diff options
Diffstat (limited to 'src/VBox/VMM/VMMAll')
47 files changed, 17780 insertions, 3342 deletions
diff --git a/src/VBox/VMM/VMMAll/CPUMAllA.asm b/src/VBox/VMM/VMMAll/CPUMAllA.asm deleted file mode 100644 index ac050d06..00000000 --- a/src/VBox/VMM/VMMAll/CPUMAllA.asm +++ /dev/null @@ -1,198 +0,0 @@ -; $Id: CPUMAllA.asm $ -;; @file -; CPUM - Guest Context Assembly Routines. -; - -; -; Copyright (C) 2006-2007 Oracle Corporation -; -; This file is part of VirtualBox Open Source Edition (OSE), as -; available from http://www.virtualbox.org. This file is free software; -; you can redistribute it and/or modify it under the terms of the GNU -; General Public License (GPL) as published by the Free Software -; Foundation, in version 2 as it comes in the "COPYING" file of the -; VirtualBox OSE distribution. VirtualBox OSE is distributed in the -; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. -; - -;******************************************************************************* -;* Header Files * -;******************************************************************************* -%include "VBox/asmdefs.mac" -%include "VBox/vmm/vm.mac" -%include "VBox/err.mac" -%include "VBox/vmm/stam.mac" -%include "CPUMInternal.mac" -%include "iprt/x86.mac" -%include "VBox/vmm/cpum.mac" - -%ifdef IN_RING3 - %error "The jump table doesn't link on leopard." -%endif - -; -; Enables write protection of Hypervisor memory pages. -; !note! Must be commented out for Trap8 debug handler. -; -%define ENABLE_WRITE_PROTECTION 1 - -BEGINCODE - - -;; -; Handles lazy FPU saving and restoring. -; -; This handler will implement lazy fpu (sse/mmx/stuff) saving. -; Two actions may be taken in this handler since the Guest OS may -; be doing lazy fpu switching. So, we'll have to generate those -; traps which the Guest CPU CTX shall have according to the -; its CR0 flags. If no traps for the Guest OS, we'll save the host -; context and restore the guest context. -; -; @returns 0 if caller should continue execution. -; @returns VINF_EM_RAW_GUEST_TRAP if a guest trap should be generated. -; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer -; -align 16 -BEGINPROC cpumHandleLazyFPUAsm - ; - ; Figure out what to do. - ; - ; There are two basic actions: - ; 1. Save host fpu and restore guest fpu. - ; 2. Generate guest trap. - ; - ; When entering the hypervisor we'll always enable MP (for proper wait - ; trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag - ; is taken from the guest OS in order to get proper SSE handling. - ; - ; - ; Actions taken depending on the guest CR0 flags: - ; - ; 3 2 1 - ; TS | EM | MP | FPUInstr | WAIT :: VMM Action - ; ------------------------------------------------------------------------ - ; 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC. - ; 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC. - ; 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC; - ; 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC. - ; 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.) - ; 1 | 0 | 1 | #NM | #NM :: Go to host taking trap there. - ; 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.) - ; 1 | 1 | 1 | #NM | #NM :: Go to host taking trap there. - - ; - ; Before taking any of these actions we're checking if we have already - ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3. - ; -%ifdef RT_ARCH_AMD64 - %ifdef RT_OS_WINDOWS - mov xDX, rcx - %else - mov xDX, rdi - %endif -%else - mov xDX, dword [esp + 4] -%endif - test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU - jz hlfpua_not_loaded - jmp hlfpua_to_host - - ; - ; Take action. - ; -align 16 -hlfpua_not_loaded: - mov eax, [xDX + CPUMCPU.Guest.cr0] - and eax, X86_CR0_MP | X86_CR0_EM | X86_CR0_TS -%ifdef RT_ARCH_AMD64 - lea r8, [hlfpuajmp1 wrt rip] - jmp qword [rax*4 + r8] -%else - jmp dword [eax*2 + hlfpuajmp1] -%endif -align 16 -;; jump table using fpu related cr0 flags as index. -hlfpuajmp1: - RTCCPTR_DEF hlfpua_switch_fpu_ctx - RTCCPTR_DEF hlfpua_switch_fpu_ctx - RTCCPTR_DEF hlfpua_switch_fpu_ctx - RTCCPTR_DEF hlfpua_switch_fpu_ctx - RTCCPTR_DEF hlfpua_switch_fpu_ctx - RTCCPTR_DEF hlfpua_to_host - RTCCPTR_DEF hlfpua_switch_fpu_ctx - RTCCPTR_DEF hlfpua_to_host -;; and mask for cr0. -hlfpu_afFlags: - RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP) - RTCCPTR_DEF ~(X86_CR0_TS) - RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP) - RTCCPTR_DEF ~(X86_CR0_TS) - RTCCPTR_DEF ~(X86_CR0_MP) - RTCCPTR_DEF 0 - RTCCPTR_DEF ~(X86_CR0_MP) - RTCCPTR_DEF 0 - - ; - ; Action - switch FPU context and change cr0 flags. - ; -align 16 -hlfpua_switch_fpu_ctx: -%ifndef IN_RING3 ; IN_RC or IN_RING0 - mov xCX, cr0 - %ifdef RT_ARCH_AMD64 - lea r8, [hlfpu_afFlags wrt rip] - and rcx, [rax*4 + r8] ; calc the new cr0 flags. - %else - and ecx, [eax*2 + hlfpu_afFlags] ; calc the new cr0 flags. - %endif - mov xAX, cr0 - and xAX, ~(X86_CR0_TS | X86_CR0_EM) - mov cr0, xAX ; clear flags so we don't trap here. -%endif -%ifndef RT_ARCH_AMD64 - mov eax, edx ; Calculate the PCPUM pointer - sub eax, [edx + CPUMCPU.offCPUM] - test dword [eax + CPUM.CPUFeatures.edx], X86_CPUID_FEATURE_EDX_FXSR - jz short hlfpua_no_fxsave -%endif - - fxsave [xDX + CPUMCPU.Host.fpu] - or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) - fxrstor [xDX + CPUMCPU.Guest.fpu] -hlfpua_finished_switch: -%ifdef IN_RC - mov cr0, xCX ; load the new cr0 flags. -%endif - ; return continue execution. - xor eax, eax - ret - -%ifndef RT_ARCH_AMD64 -; legacy support. -hlfpua_no_fxsave: - fnsave [xDX + CPUMCPU.Host.fpu] - or dword [xDX + CPUMCPU.fUseFlags], dword (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) ; yasm / nasm - mov eax, [xDX + CPUMCPU.Guest.fpu] ; control word - not eax ; 1 means exception ignored (6 LS bits) - and eax, byte 03Fh ; 6 LS bits only - test eax, [xDX + CPUMCPU.Guest.fpu + 4] ; status word - jz short hlfpua_no_exceptions_pending - ; technically incorrect, but we certainly don't want any exceptions now!! - and dword [xDX + CPUMCPU.Guest.fpu + 4], ~03Fh -hlfpua_no_exceptions_pending: - frstor [xDX + CPUMCPU.Guest.fpu] - jmp near hlfpua_finished_switch -%endif ; !RT_ARCH_AMD64 - - - ; - ; Action - Generate Guest trap. - ; -hlfpua_action_4: -hlfpua_to_host: - mov eax, VINF_EM_RAW_GUEST_TRAP - ret -ENDPROC cpumHandleLazyFPUAsm - - diff --git a/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp b/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp new file mode 100644 index 00000000..c34a3d47 --- /dev/null +++ b/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp @@ -0,0 +1,5584 @@ +/* $Id: CPUMAllMsrs.cpp $ */ +/** @file + * CPUM - CPU MSR Registers. + */ + +/* + * Copyright (C) 2013 Oracle Corporation + * + * This file is part of VirtualBox Open Source Edition (OSE), as + * available from http://www.virtualbox.org. This file is free software; + * you can redistribute it and/or modify it under the terms of the GNU + * General Public License (GPL) as published by the Free Software + * Foundation, in version 2 as it comes in the "COPYING" file of the + * VirtualBox OSE distribution. VirtualBox OSE is distributed in the + * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. + */ + +/******************************************************************************* +* Header Files * +*******************************************************************************/ +#define LOG_GROUP LOG_GROUP_CPUM +#include <VBox/vmm/cpum.h> +#include <VBox/vmm/pdmapi.h> +#include <VBox/vmm/hm.h> +#include <VBox/vmm/tm.h> +#include "CPUMInternal.h" +#include <VBox/vmm/vm.h> +#include <VBox/err.h> + + +/******************************************************************************* +* Defined Constants And Macros * +*******************************************************************************/ +/** + * Validates the CPUMMSRRANGE::offCpumCpu value and declares a local variable + * pointing to it. + * + * ASSUMES sizeof(a_Type) is a power of two and that the member is aligned + * correctly. + */ +#define CPUM_MSR_ASSERT_CPUMCPU_OFFSET_RETURN(a_pVCpu, a_pRange, a_Type, a_VarName) \ + AssertMsgReturn( (a_pRange)->offCpumCpu >= 8 \ + && (a_pRange)->offCpumCpu < sizeof(CPUMCPU) \ + && !((a_pRange)->offCpumCpu & (RT_MIN(sizeof(a_Type), 8) - 1)) \ + , ("offCpumCpu=%#x %s\n", (a_pRange)->offCpumCpu, (a_pRange)->szName), \ + VERR_CPUM_MSR_BAD_CPUMCPU_OFFSET); \ + a_Type *a_VarName = (a_Type *)((uintptr_t)&(a_pVCpu)->cpum.s + (a_pRange)->offCpumCpu) + + +/******************************************************************************* +* Structures and Typedefs * +*******************************************************************************/ + +/** + * Implements reading one or more MSRs. + * + * @returns VBox status code. + * @retval VINF_SUCCESS on success. + * @retval VERR_CPUM_RAISE_GP_0 on failure (invalid MSR). + * + * @param pVCpu Pointer to the VMCPU. + * @param idMsr The MSR we're reading. + * @param pRange The MSR range descriptor. + * @param puValue Where to return the value. + */ +typedef DECLCALLBACK(int) FNCPUMRDMSR(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue); +/** Pointer to a RDMSR worker for a specific MSR or range of MSRs. */ +typedef FNCPUMRDMSR *PFNCPUMRDMSR; + + +/** + * Implements writing one or more MSRs. + * + * @retval VINF_SUCCESS on success. + * @retval VERR_CPUM_RAISE_GP_0 on failure. + * + * @param pVCpu Pointer to the VMCPU. + * @param idMsr The MSR we're writing. + * @param pRange The MSR range descriptor. + * @param uValue The value to set, ignored bits masked. + * @param uRawValue The raw value with the ignored bits not masked. + */ +typedef DECLCALLBACK(int) FNCPUMWRMSR(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue); +/** Pointer to a WRMSR worker for a specific MSR or range of MSRs. */ +typedef FNCPUMWRMSR *PFNCPUMWRMSR; + + + +/* + * Generic functions. + * Generic functions. + * Generic functions. + */ + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_FixedValue(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IgnoreWrite(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + Log(("CPUM: Ignoring WRMSR %#x (%s), %#llx\n", idMsr, pRange->szName, uValue)); + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_WriteOnly(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + return VERR_CPUM_RAISE_GP_0; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_ReadOnly(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + Assert(pRange->fWrGpMask == UINT64_MAX); + return VERR_CPUM_RAISE_GP_0; +} + + + + +/* + * IA32 + * IA32 + * IA32 + */ + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32P5McAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; /** @todo implement machine check injection. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32P5McAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement machine check injection. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32P5McType(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; /** @todo implement machine check injection. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32P5McType(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement machine check injection. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32TimestampCounter(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = TMCpuTickGet(pVCpu); + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32TimestampCounter(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + TMCpuTickSet(pVCpu->CTX_SUFF(pVM), pVCpu, uValue); + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32PlatformId(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + uint64_t uValue = pRange->uValue; + if (uValue & 0x1f00) + { + /* Max allowed bus ratio present. */ + /** @todo Implement scaled BUS frequency. */ + } + + *puValue = uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32ApicBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + PVM pVM = pVCpu->CTX_SUFF(pVM); + if ( !pVM->cpum.s.GuestFeatures.fApic + && !pVM->cpum.s.GuestFeatures.fX2Apic) + { + Log(("CPUM: %s, apic not present -> GP\n", pRange->szName)); + return VERR_CPUM_RAISE_GP_0; + } + + *puValue = pVCpu->cpum.s.Guest.msrApicBase; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32ApicBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + int rc = PDMApicSetBase(pVCpu, uValue); + if (rc != VINF_SUCCESS) + rc = VERR_CPUM_RAISE_GP_0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32FeatureControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 1; /* Locked, no VT-X, no SYSENTER micromanagement. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32FeatureControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + return VERR_CPUM_RAISE_GP_0; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32BiosSignId(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo fake microcode update. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32BiosSignId(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /* Normally, zero is written to Ia32BiosSignId before reading it in order + to select the signature instead of the BBL_CR_D3 behaviour. The GP mask + of the database entry should take care of most illegal writes for now, so + just ignore all writes atm. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32BiosUpdateTrigger(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Fake bios update trigger better. The value is the address to an + * update package, I think. We should probably GP if it's invalid. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32SmmMonitorCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo SMM. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32SmmMonitorCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo SMM. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32PmcN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo check CPUID leaf 0ah. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32PmcN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo check CPUID leaf 0ah. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32MonitorFilterLineSize(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo return 0x1000 if we try emulate mwait 100% correctly. */ + *puValue = 0x40; /** @todo Change to CPU cache line size. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32MonitorFilterLineSize(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo should remember writes, though it's supposedly something only a BIOS + * would write so, it's not extremely important. */ + return VINF_SUCCESS; +} + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32MPerf(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Read MPERF: Adjust against previously written MPERF value. Is TSC + * what we want? */ + *puValue = TMCpuTickGet(pVCpu); + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32MPerf(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Write MPERF: Calc adjustment. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32APerf(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Read APERF: Adjust against previously written MPERF value. Is TSC + * what we want? */ + *puValue = TMCpuTickGet(pVCpu); + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32APerf(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Write APERF: Calc adjustment. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32MtrrCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /* This is currently a bit weird. :-) */ + uint8_t const cVariableRangeRegs = 0; + bool const fSystemManagementRangeRegisters = false; + bool const fFixedRangeRegisters = false; + bool const fWriteCombiningType = false; + *puValue = cVariableRangeRegs + | (fFixedRangeRegisters ? RT_BIT_64(8) : 0) + | (fWriteCombiningType ? RT_BIT_64(10) : 0) + | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0); + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32MtrrPhysBaseN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Implement variable MTRR storage. */ + Assert(pRange->uValue == (idMsr - 0x200) / 2); + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32MtrrPhysBaseN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /* + * Validate the value. + */ + Assert(pRange->uValue == (idMsr - 0x200) / 2); + + if ((uValue & 0xff) >= 7) + { + Log(("CPUM: Invalid type set writing MTRR PhysBase MSR %#x: %#llx (%#llx)\n", idMsr, uValue, uValue & 0xff)); + return VERR_CPUM_RAISE_GP_0; + } + + uint64_t fInvPhysMask = ~(RT_BIT_64(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.cMaxPhysAddrWidth) - 1U); + if (fInvPhysMask & uValue) + { + Log(("CPUM: Invalid physical address bits set writing MTRR PhysBase MSR %#x: %#llx (%#llx)\n", + idMsr, uValue, uValue & fInvPhysMask)); + return VERR_CPUM_RAISE_GP_0; + } + + /* + * Store it. + */ + /** @todo Implement variable MTRR storage. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32MtrrPhysMaskN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Implement variable MTRR storage. */ + Assert(pRange->uValue == (idMsr - 0x200) / 2); + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32MtrrPhysMaskN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /* + * Validate the value. + */ + Assert(pRange->uValue == (idMsr - 0x200) / 2); + + uint64_t fInvPhysMask = ~(RT_BIT_64(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.cMaxPhysAddrWidth) - 1U); + if (fInvPhysMask & uValue) + { + Log(("CPUM: Invalid physical address bits set writing MTRR PhysMask MSR %#x: %#llx (%#llx)\n", + idMsr, uValue, uValue & fInvPhysMask)); + return VERR_CPUM_RAISE_GP_0; + } + + /* + * Store it. + */ + /** @todo Implement variable MTRR storage. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32MtrrFixed(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + CPUM_MSR_ASSERT_CPUMCPU_OFFSET_RETURN(pVCpu, pRange, uint64_t, puFixedMtrr); + *puValue = *puFixedMtrr; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32MtrrFixed(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + CPUM_MSR_ASSERT_CPUMCPU_OFFSET_RETURN(pVCpu, pRange, uint64_t, puFixedMtrr); + for (uint32_t cShift = 0; cShift < 63; cShift += 8) + { + uint8_t uType = (uint8_t)(uValue >> cShift); + if (uType >= 7) + { + Log(("CPUM: Invalid MTRR type at %u:%u in fixed range (%#x/%s): %#llx (%#llx)\n", + cShift + 7, cShift, idMsr, pRange->szName, uValue, uType)); + return VERR_CPUM_RAISE_GP_0; + } + } + *puFixedMtrr = uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32MtrrDefType(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32MtrrDefType(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + if ((uValue & 0xff) >= 7) + { + Log(("CPUM: Invalid MTRR default type value: %#llx (%#llx)\n", pRange->szName, uValue, uValue & 0xff)); + return VERR_CPUM_RAISE_GP_0; + } + + pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType = uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32Pat(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pVCpu->cpum.s.Guest.msrPAT; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32Pat(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + pVCpu->cpum.s.Guest.msrPAT = uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32SysEnterCs(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pVCpu->cpum.s.Guest.SysEnter.cs; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32SysEnterCs(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /* Note! We used to mask this by 0xffff, but turns out real HW doesn't and + there are generally 32-bit working bits backing this register. */ + pVCpu->cpum.s.Guest.SysEnter.cs = uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32SysEnterEsp(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pVCpu->cpum.s.Guest.SysEnter.esp; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32SysEnterEsp(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + if (X86_IS_CANONICAL(uValue)) + { + pVCpu->cpum.s.Guest.SysEnter.esp = uValue; + return VINF_SUCCESS; + } + Log(("CPUM: IA32_SYSENTER_ESP not canonical! %#llx\n", uValue)); + return VERR_CPUM_RAISE_GP_0; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32SysEnterEip(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pVCpu->cpum.s.Guest.SysEnter.eip; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32SysEnterEip(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + if (X86_IS_CANONICAL(uValue)) + { + pVCpu->cpum.s.Guest.SysEnter.eip = uValue; + return VINF_SUCCESS; + } + Log(("CPUM: IA32_SYSENTER_EIP not canonical! %#llx\n", uValue)); + return VERR_CPUM_RAISE_GP_0; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32McgCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ +#if 0 /** @todo implement machine checks. */ + *puValue = pRange->uValue & (RT_BIT_64(8) | 0); +#else + *puValue = 0; +#endif + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32McgStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement machine checks. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32McgStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement machine checks. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32McgCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement machine checks. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32McgCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement machine checks. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32DebugCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement IA32_DEBUGCTL. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32DebugCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement IA32_DEBUGCTL. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32SmrrPhysBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement intel SMM. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32SmrrPhysBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement intel SMM. */ + return VERR_CPUM_RAISE_GP_0; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32SmrrPhysMask(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement intel SMM. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32SmrrPhysMask(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement intel SMM. */ + return VERR_CPUM_RAISE_GP_0; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32PlatformDcaCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement intel direct cache access (DCA)?? */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32PlatformDcaCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement intel direct cache access (DCA)?? */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32CpuDcaCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement intel direct cache access (DCA)?? */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32Dca0Cap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement intel direct cache access (DCA)?? */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32Dca0Cap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement intel direct cache access (DCA)?? */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32PerfEvtSelN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement IA32_PERFEVTSEL0+. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32PerfEvtSelN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement IA32_PERFEVTSEL0+. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32PerfStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + uint64_t uValue = pRange->uValue; + + /* Always provide the max bus ratio for now. XNU expects it. */ + uValue &= ~((UINT64_C(0x1f) << 40) | RT_BIT_64(46)); + + PVM pVM = pVCpu->CTX_SUFF(pVM); + uint64_t uScalableBusHz = CPUMGetGuestScalableBusFrequency(pVM); + uint64_t uTscHz = TMCpuTicksPerSecond(pVM); + uint8_t uTscRatio = (uint8_t)((uTscHz + uScalableBusHz / 2) / uScalableBusHz); + if (uTscRatio > 0x1f) + uTscRatio = 0x1f; + uValue |= (uint64_t)uTscRatio << 40; + + *puValue = uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32PerfStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /* Pentium4 allows writing, but all bits are ignored. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32PerfCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement IA32_PERFCTL. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32PerfCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement IA32_PERFCTL. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32FixedCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement IA32_FIXED_CTRn (fixed performance counters). */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32FixedCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement IA32_FIXED_CTRn (fixed performance counters). */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32PerfCapabilities(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement performance counters. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32PerfCapabilities(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement performance counters. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32FixedCtrCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement performance counters. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32FixedCtrCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement performance counters. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32PerfGlobalStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement performance counters. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32PerfGlobalStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement performance counters. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32PerfGlobalCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement performance counters. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32PerfGlobalCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement performance counters. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32PerfGlobalOvfCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement performance counters. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32PerfGlobalOvfCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement performance counters. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32PebsEnable(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement performance counters. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32PebsEnable(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement performance counters. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32ClockModulation(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement IA32_CLOCK_MODULATION. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32ClockModulation(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement IA32_CLOCK_MODULATION. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32ThermInterrupt(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement IA32_THERM_INTERRUPT. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32ThermInterrupt(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement IA32_THERM_STATUS. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32ThermStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement IA32_THERM_STATUS. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32ThermStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement IA32_THERM_INTERRUPT. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32Therm2Ctl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement IA32_THERM2_CTL. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32Therm2Ctl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement IA32_THERM2_CTL. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32MiscEnable(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pVCpu->cpum.s.GuestMsrs.msr.MiscEnable; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32MiscEnable(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ +#ifdef LOG_ENABLED + uint64_t const uOld = pVCpu->cpum.s.GuestMsrs.msr.MiscEnable; +#endif + + /* Unsupported bits are generally ignored and stripped by the MSR range + entry that got us here. So, we just need to preserve fixed bits. */ + pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = uValue + | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL + | MSR_IA32_MISC_ENABLE_BTS_UNAVAIL; + + Log(("CPUM: IA32_MISC_ENABLE; old=%#llx written=%#llx => %#llx\n", + uOld, uValue, pVCpu->cpum.s.GuestMsrs.msr.MiscEnable)); + + /** @todo Wire IA32_MISC_ENABLE bit 22 to our NT 4 CPUID trick. */ + /** @todo Wire up MSR_IA32_MISC_ENABLE_XD_DISABLE. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32McCtlStatusAddrMiscN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Implement machine check exception injection. */ + switch (idMsr & 3) + { + case 0: + case 1: + *puValue = 0; + break; + + /* The ADDR and MISC registers aren't accessible since the + corresponding STATUS bits are zero. */ + case 2: + Log(("CPUM: Reading IA32_MCi_ADDR %#x -> #GP\n", idMsr)); + return VERR_CPUM_RAISE_GP_0; + case 3: + Log(("CPUM: Reading IA32_MCi_MISC %#x -> #GP\n", idMsr)); + return VERR_CPUM_RAISE_GP_0; + } + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32McCtlStatusAddrMiscN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + switch (idMsr & 3) + { + case 0: + /* Ignore writes to the CTL register. */ + break; + + case 1: + /* According to specs, the STATUS register can only be written to + with the value 0. VBoxCpuReport thinks different for a + Pentium M Dothan, but implementing according to specs now. */ + if (uValue != 0) + { + Log(("CPUM: Writing non-zero value (%#llx) to IA32_MCi_STATUS %#x -> #GP\n", uValue, idMsr)); + return VERR_CPUM_RAISE_GP_0; + } + break; + + /* Specs states that ADDR and MISC can be cleared by writing zeros. + Writing 1s will GP. Need to figure out how this relates to the + ADDRV and MISCV status flags. If writing is independent of those + bits, we need to know whether the CPU really implements them since + that is exposed by writing 0 to them. + Implementing the solution with the fewer GPs for now. */ + case 2: + if (uValue != 0) + { + Log(("CPUM: Writing non-zero value (%#llx) to IA32_MCi_ADDR %#x -> #GP\n", uValue, idMsr)); + return VERR_CPUM_RAISE_GP_0; + } + break; + case 3: + if (uValue != 0) + { + Log(("CPUM: Writing non-zero value (%#llx) to IA32_MCi_MISC %#x -> #GP\n", uValue, idMsr)); + return VERR_CPUM_RAISE_GP_0; + } + break; + } + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32McNCtl2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Implement machine check exception injection. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32McNCtl2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Implement machine check exception injection. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32DsArea(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement IA32_DS_AREA. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32DsArea(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32TscDeadline(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement TSC deadline timer. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32TscDeadline(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement TSC deadline timer. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32X2ApicN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + int rc = PDMApicReadMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, puValue); + if (rc != VINF_SUCCESS) + { + Log(("CPUM: X2APIC %#x read => %Rrc => #GP\n", idMsr, rc)); + return VERR_CPUM_RAISE_GP_0; + } + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32X2ApicN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + int rc = PDMApicWriteMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, uValue); + if (rc != VINF_SUCCESS) + { + Log(("CPUM: X2APIC %#x write %#llx => %Rrc => #GP\n", idMsr, rc, uValue)); + return VERR_CPUM_RAISE_GP_0; + } + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32DebugInterface(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo IA32_DEBUG_INTERFACE (no docs) */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Ia32DebugInterface(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo IA32_DEBUG_INTERFACE (no docs) */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32VmxBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32VmxPinbasedCtls(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32VmxProcbasedCtls(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32VmxExitCtls(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32VmxEntryCtls(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32VmxMisc(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32VmxCr0Fixed0(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32VmxCr0Fixed1(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32VmxCr4Fixed0(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32VmxCr4Fixed1(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32VmxVmcsEnum(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32VmxProcBasedCtls2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32VmxEptVpidCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32VmxTruePinbasedCtls(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32VmxTrueProcbasedCtls(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32VmxTrueExitCtls(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Ia32VmxTrueEntryCtls(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + + + + + + + + + +/* + * AMD64 + * AMD64 + * AMD64 + */ + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Amd64Efer(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pVCpu->cpum.s.Guest.msrEFER; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Amd64Efer(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + PVM pVM = pVCpu->CTX_SUFF(pVM); + uint64_t const uOldEfer = pVCpu->cpum.s.Guest.msrEFER; + uint32_t const fExtFeatures = pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 + ? pVM->cpum.s.aGuestCpuIdExt[1].edx + : 0; + uint64_t fMask = 0; + + /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */ + if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX) + fMask |= MSR_K6_EFER_NXE; + if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) + fMask |= MSR_K6_EFER_LME; + if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_SYSCALL) + fMask |= MSR_K6_EFER_SCE; + if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR) + fMask |= MSR_K6_EFER_FFXSR; + + /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if + paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */ + if ( (uOldEfer & MSR_K6_EFER_LME) != (uValue & fMask & MSR_K6_EFER_LME) + && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)) + { + Log(("CPUM: Illegal MSR_K6_EFER_LME change: paging is enabled!!\n")); + return VERR_CPUM_RAISE_GP_0; + } + + /* There are a few more: e.g. MSR_K6_EFER_LMSLE */ + AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)), + ("Unexpected value %RX64\n", uValue)); + pVCpu->cpum.s.Guest.msrEFER = (uOldEfer & ~fMask) | (uValue & fMask); + + /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB + if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */ + if ( (uOldEfer & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA)) + != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))) + { + /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/); + HMFlushTLB(pVCpu); + + /* Notify PGM about NXE changes. */ + if ( (uOldEfer & MSR_K6_EFER_NXE) + != (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE)) + PGMNotifyNxeChanged(pVCpu, !(uOldEfer & MSR_K6_EFER_NXE)); + } + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Amd64SyscallTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pVCpu->cpum.s.Guest.msrSTAR; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Amd64SyscallTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + pVCpu->cpum.s.Guest.msrSTAR = uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Amd64LongSyscallTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pVCpu->cpum.s.Guest.msrLSTAR; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Amd64LongSyscallTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + if (!X86_IS_CANONICAL(uValue)) + { + Log(("CPUM: wrmsr %s(%#x), %#llx -> %#GP - not canonical\n", pRange->szName, idMsr, uValue)); + return VERR_CPUM_RAISE_GP_0; + } + pVCpu->cpum.s.Guest.msrLSTAR = uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Amd64CompSyscallTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pVCpu->cpum.s.Guest.msrCSTAR; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Amd64CompSyscallTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + if (!X86_IS_CANONICAL(uValue)) + { + Log(("CPUM: wrmsr %s(%#x), %#llx -> %#GP - not canonical\n", pRange->szName, idMsr, uValue)); + return VERR_CPUM_RAISE_GP_0; + } + pVCpu->cpum.s.Guest.msrCSTAR = uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Amd64SyscallFlagMask(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pVCpu->cpum.s.Guest.msrSFMASK; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Amd64SyscallFlagMask(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + pVCpu->cpum.s.Guest.msrSFMASK = uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Amd64FsBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pVCpu->cpum.s.Guest.fs.u64Base; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Amd64FsBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + pVCpu->cpum.s.Guest.fs.u64Base = uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Amd64GsBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pVCpu->cpum.s.Guest.gs.u64Base; + return VINF_SUCCESS; +} + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Amd64GsBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + pVCpu->cpum.s.Guest.gs.u64Base = uValue; + return VINF_SUCCESS; +} + + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Amd64KernelGsBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pVCpu->cpum.s.Guest.msrKERNELGSBASE; + return VINF_SUCCESS; +} + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Amd64KernelGsBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + pVCpu->cpum.s.Guest.msrKERNELGSBASE = uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_Amd64TscAux(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pVCpu->cpum.s.GuestMsrs.msr.TscAux; + return VINF_SUCCESS; +} + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_Amd64TscAux(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + pVCpu->cpum.s.GuestMsrs.msr.TscAux = uValue; + return VINF_SUCCESS; +} + + +/* + * Intel specific + * Intel specific + * Intel specific + */ + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelEblCrPowerOn(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo recalc clock frequency ratio? */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelEblCrPowerOn(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Write EBL_CR_POWERON: Remember written bits. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7CoreThreadCount(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /* Note! According to cpuid_set_info in XNU (10.7.0), Westmere CPU only + have a 4-bit core count. */ + uint16_t cCores = pVCpu->CTX_SUFF(pVM)->cCpus; + uint16_t cThreads = cCores; /** @todo hyper-threading. */ + *puValue = RT_MAKE_U32(cThreads, cCores); + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelP4EbcHardPowerOn(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo P4 hard power on config */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelP4EbcHardPowerOn(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo P4 hard power on config */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelP4EbcSoftPowerOn(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo P4 soft power on config */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelP4EbcSoftPowerOn(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo P4 soft power on config */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelP4EbcFrequencyId(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + uint64_t uValue; + PVM pVM = pVCpu->CTX_SUFF(pVM); + uint64_t uScalableBusHz = CPUMGetGuestScalableBusFrequency(pVM); + if (pVM->cpum.s.GuestFeatures.uModel >= 2) + { + if (uScalableBusHz <= CPUM_SBUSFREQ_100MHZ && pVM->cpum.s.GuestFeatures.uModel <= 2) + { + uScalableBusHz = CPUM_SBUSFREQ_100MHZ; + uValue = 0; + } + else if (uScalableBusHz <= CPUM_SBUSFREQ_133MHZ) + { + uScalableBusHz = CPUM_SBUSFREQ_133MHZ; + uValue = 1; + } + else if (uScalableBusHz <= CPUM_SBUSFREQ_167MHZ) + { + uScalableBusHz = CPUM_SBUSFREQ_167MHZ; + uValue = 3; + } + else if (uScalableBusHz <= CPUM_SBUSFREQ_200MHZ) + { + uScalableBusHz = CPUM_SBUSFREQ_200MHZ; + uValue = 2; + } + else if (uScalableBusHz <= CPUM_SBUSFREQ_267MHZ && pVM->cpum.s.GuestFeatures.uModel > 2) + { + uScalableBusHz = CPUM_SBUSFREQ_267MHZ; + uValue = 0; + } + else + { + uScalableBusHz = CPUM_SBUSFREQ_333MHZ; + uValue = 6; + } + uValue <<= 16; + + uint64_t uTscHz = TMCpuTicksPerSecond(pVM); + uint8_t uTscRatio = (uint8_t)((uTscHz + uScalableBusHz / 2) / uScalableBusHz); + uValue |= (uint32_t)uTscRatio << 24; + + uValue |= pRange->uValue & ~UINT64_C(0xff0f0000); + } + else + { + /* Probably more stuff here, but intel doesn't want to tell us. */ + uValue = pRange->uValue; + uValue &= ~(RT_BIT_64(21) | RT_BIT_64(22) | RT_BIT_64(23)); /* 100 MHz is only documented value */ + } + + *puValue = uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelP4EbcFrequencyId(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo P4 bus frequency config */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelP6FsbFrequency(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /* Convert the scalable bus frequency to the encoding in the intel manual (for core+). */ + uint64_t uScalableBusHz = CPUMGetGuestScalableBusFrequency(pVCpu->CTX_SUFF(pVM)); + if (uScalableBusHz <= CPUM_SBUSFREQ_100MHZ) + *puValue = 5; + else if (uScalableBusHz <= CPUM_SBUSFREQ_133MHZ) + *puValue = 1; + else if (uScalableBusHz <= CPUM_SBUSFREQ_167MHZ) + *puValue = 3; + else if (uScalableBusHz <= CPUM_SBUSFREQ_200MHZ) + *puValue = 2; + else if (uScalableBusHz <= CPUM_SBUSFREQ_267MHZ) + *puValue = 0; + else if (uScalableBusHz <= CPUM_SBUSFREQ_333MHZ) + *puValue = 4; + else /*if (uScalableBusHz <= CPUM_SBUSFREQ_400MHZ)*/ + *puValue = 6; + + *puValue |= pRange->uValue & ~UINT64_C(0x7); + + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelPlatformInfo(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /* Just indicate a fixed TSC, no turbo boost, no programmable anything. */ + PVM pVM = pVCpu->CTX_SUFF(pVM); + uint64_t uScalableBusHz = CPUMGetGuestScalableBusFrequency(pVM); + uint64_t uTscHz = TMCpuTicksPerSecond(pVM); + uint8_t uTscRatio = (uint8_t)((uTscHz + uScalableBusHz / 2) / uScalableBusHz); + uint64_t uValue = ((uint32_t)uTscRatio << 8) /* TSC invariant frequency. */ + | ((uint64_t)uTscRatio << 40); /* The max turbo frequency. */ + + /* Ivy bridge has a minimum operating ratio as well. */ + if (true) /** @todo detect sandy bridge. */ + uValue |= (uint64_t)uTscRatio << 48; + + *puValue = uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelFlexRatio(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + uint64_t uValue = pRange->uValue & ~UINT64_C(0x1ff00); + + PVM pVM = pVCpu->CTX_SUFF(pVM); + uint64_t uScalableBusHz = CPUMGetGuestScalableBusFrequency(pVM); + uint64_t uTscHz = TMCpuTicksPerSecond(pVM); + uint8_t uTscRatio = (uint8_t)((uTscHz + uScalableBusHz / 2) / uScalableBusHz); + uValue |= (uint32_t)uTscRatio << 8; + + *puValue = uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelFlexRatio(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement writing MSR_FLEX_RATIO. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelPkgCStConfigControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelPkgCStConfigControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + if (pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl & RT_BIT_64(15)) + { + Log(("CPUM: WRMDR %#x (%s), %#llx: Write protected -> #GP\n", idMsr, pRange->szName, uValue)); + return VERR_CPUM_RAISE_GP_0; + } +#if 0 /** @todo check what real (old) hardware does. */ + if ((uValue & 7) >= 5) + { + Log(("CPUM: WRMDR %#x (%s), %#llx: Invalid limit (%d) -> #GP\n", idMsr, pRange->szName, uValue, (uint32_t)(uValue & 7))); + return VERR_CPUM_RAISE_GP_0; + } +#endif + pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelPmgIoCaptureBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement I/O mwait wakeup. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelPmgIoCaptureBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement I/O mwait wakeup. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelLastBranchFromToN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement last branch records. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelLastBranchFromToN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement last branch records. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelLastBranchFromN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement last branch records. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelLastBranchFromN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement last branch records. */ + /** @todo Probing indicates that bit 63 is settable on SandyBridge, at least + * if the rest of the bits are zero. Automatic sign extending? + * Investigate! */ + if (!X86_IS_CANONICAL(uValue)) + { + Log(("CPUM: wrmsr %s(%#x), %#llx -> %#GP - not canonical\n", pRange->szName, idMsr, uValue)); + return VERR_CPUM_RAISE_GP_0; + } + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelLastBranchToN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement last branch records. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelLastBranchToN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement last branch records. */ + /** @todo Probing indicates that bit 63 is settable on SandyBridge, at least + * if the rest of the bits are zero. Automatic sign extending? + * Investigate! */ + if (!X86_IS_CANONICAL(uValue)) + { + Log(("CPUM: wrmsr %s(%#x), %#llx -> %#GP - not canonical\n", pRange->szName, idMsr, uValue)); + return VERR_CPUM_RAISE_GP_0; + } + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelLastBranchTos(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement last branch records. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelLastBranchTos(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement last branch records. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelBblCrCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelBblCrCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelBblCrCtl3(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelBblCrCtl3(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7TemperatureTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7TemperatureTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7MsrOffCoreResponseN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo machine check. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7MsrOffCoreResponseN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo machine check. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7MiscPwrMgmt(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7MiscPwrMgmt(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelP6CrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + int rc = CPUMGetGuestCRx(pVCpu, pRange->uValue, puValue); + AssertRC(rc); + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelP6CrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /* This CRx interface differs from the MOV CRx, GReg interface in that + #GP(0) isn't raised if unsupported bits are written to. Instead they + are simply ignored and masked off. (Pentium M Dothan) */ + /** @todo Implement MSR_P6_CRx writing. Too much effort for very little, if + * any, gain. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelCpuId1FeatureMaskEcdx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement CPUID masking. */ + *puValue = UINT64_MAX; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelCpuId1FeatureMaskEcdx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement CPUID masking. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelCpuId1FeatureMaskEax(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement CPUID masking. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelCpuId1FeatureMaskEax(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement CPUID masking. */ + return VINF_SUCCESS; +} + + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelCpuId80000001FeatureMaskEcdx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement CPUID masking. */ + *puValue = UINT64_MAX; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelCpuId80000001FeatureMaskEcdx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement CPUID masking. */ + return VINF_SUCCESS; +} + + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7SandyAesNiCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement AES-NI. */ + *puValue = 3; /* Bit 0 is lock bit, bit 1 disables AES-NI. That's what they say. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7SandyAesNiCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement AES-NI. */ + return VERR_CPUM_RAISE_GP_0; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7TurboRatioLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement intel C states. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7TurboRatioLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement intel C states. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7LbrSelect(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement last-branch-records. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7LbrSelect(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement last-branch-records. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7SandyErrorControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement memory error injection (MSR_ERROR_CONTROL). */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7SandyErrorControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement memory error injection (MSR_ERROR_CONTROL). */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7VirtualLegacyWireCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement memory VLW? */ + *puValue = pRange->uValue; + /* Note: A20M is known to be bit 1 as this was disclosed in spec update + AAJ49/AAK51/????, which documents the inversion of this bit. The + Sandy bridge CPU here has value 0x74, so it probably doesn't have a BIOS + that correct things. Some guesses at the other bits: + bit 2 = INTR + bit 4 = SMI + bit 5 = INIT + bit 6 = NMI */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7PowerCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7PowerCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo intel power management */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7SandyPebsNumAlt(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel performance counters. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7SandyPebsNumAlt(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo intel performance counters. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7PebsLdLat(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel performance counters. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7PebsLdLat(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo intel performance counters. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7PkgCnResidencyN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7CoreCnResidencyN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7SandyVrCurrentConfig(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Figure out what MSR_VR_CURRENT_CONFIG & MSR_VR_MISC_CONFIG are. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7SandyVrCurrentConfig(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Figure out what MSR_VR_CURRENT_CONFIG & MSR_VR_MISC_CONFIG are. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7SandyVrMiscConfig(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Figure out what MSR_VR_CURRENT_CONFIG & MSR_VR_MISC_CONFIG are. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7SandyVrMiscConfig(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Figure out what MSR_VR_CURRENT_CONFIG & MSR_VR_MISC_CONFIG are. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7SandyRaplPowerUnit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel RAPL. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7SandyPkgCnIrtlN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7SandyPkgCnIrtlN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo intel power management. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7SandyPkgC2Residency(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPkgPowerLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel RAPL. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7RaplPkgPowerLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo intel RAPL. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPkgEnergyStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPkgPerfStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPkgPowerInfo(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplDramPowerLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel RAPL. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7RaplDramPowerLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo intel RAPL. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplDramEnergyStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplDramPerfStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplDramPowerInfo(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPp0PowerLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel RAPL. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7RaplPp0PowerLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo intel RAPL. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPp0EnergyStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPp0Policy(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel RAPL. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7RaplPp0Policy(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo intel RAPL. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPp0PerfStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPp1PowerLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel RAPL. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7RaplPp1PowerLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo intel RAPL. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPp1EnergyStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPp1Policy(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel RAPL. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7RaplPp1Policy(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo intel RAPL. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7IvyConfigTdpNominal(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7IvyConfigTdpLevel1(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7IvyConfigTdpLevel2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7IvyConfigTdpControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7IvyConfigTdpControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo intel power management. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7IvyTurboActivationRatio(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo intel power management. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7IvyTurboActivationRatio(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo intel power management. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7UncPerfGlobalCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo uncore msrs. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7UncPerfGlobalCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo uncore msrs. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7UncPerfGlobalStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo uncore msrs. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7UncPerfGlobalStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo uncore msrs. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7UncPerfGlobalOvfCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo uncore msrs. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7UncPerfGlobalOvfCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo uncore msrs. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7UncPerfFixedCtrCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo uncore msrs. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7UncPerfFixedCtrCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo uncore msrs. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7UncPerfFixedCtr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo uncore msrs. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7UncPerfFixedCtr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo uncore msrs. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7UncCBoxConfig(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo uncore msrs. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7UncArbPerfCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo uncore msrs. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7UncArbPerfCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo uncore msrs. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelI7UncArbPerfEvtSelN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo uncore msrs. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelI7UncArbPerfEvtSelN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo uncore msrs. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelCore2EmttmCrTablesN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement enhanced multi thread termal monitoring? */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelCore2EmttmCrTablesN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement enhanced multi thread termal monitoring? */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelCore2SmmCStMiscInfo(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo SMM & C-states? */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelCore2SmmCStMiscInfo(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo SMM & C-states? */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelCore1ExtConfig(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Core1&2 EXT_CONFIG (whatever that is)? */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelCore1ExtConfig(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Core1&2 EXT_CONFIG (whatever that is)? */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelCore1DtsCalControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Core1&2(?) DTS_CAL_CTRL (whatever that is)? */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelCore1DtsCalControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Core1&2(?) DTS_CAL_CTRL (whatever that is)? */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_IntelCore2PeciControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Core2+ platform environment control interface control register? */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_IntelCore2PeciControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Core2+ platform environment control interface control register? */ + return VINF_SUCCESS; +} + + + +/* + * Multiple vendor P6 MSRs. + * Multiple vendor P6 MSRs. + * Multiple vendor P6 MSRs. + * + * These MSRs were introduced with the P6 but not elevated to architectural + * MSRs, despite other vendors implementing them. + */ + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_P6LastBranchFromIp(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /* AMD seems to just record RIP, while intel claims to record RIP+CS.BASE + if I read the docs correctly, thus the need for separate functions. */ + /** @todo implement last branch records. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_P6LastBranchToIp(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement last branch records. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_P6LastIntFromIp(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement last exception records. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_P6LastIntFromIp(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement last exception records. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_P6LastIntToIp(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo implement last exception records. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_P6LastIntToIp(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo implement last exception records. */ + return VINF_SUCCESS; +} + + + +/* + * AMD specific + * AMD specific + * AMD specific + */ + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam15hTscRate(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Implement TscRateMsr */ + *puValue = RT_MAKE_U64(0, 1); /* 1.0 = reset value. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam15hTscRate(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Implement TscRateMsr */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam15hLwpCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Implement AMD LWP? (Instructions: LWPINS, LWPVAL, LLWPCB, SLWPCB) */ + /* Note: Only listes in BKDG for Family 15H. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam15hLwpCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Implement AMD LWP? (Instructions: LWPINS, LWPVAL, LLWPCB, SLWPCB) */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam15hLwpCbAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Implement AMD LWP? (Instructions: LWPINS, LWPVAL, LLWPCB, SLWPCB) */ + /* Note: Only listes in BKDG for Family 15H. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam15hLwpCbAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Implement AMD LWP? (Instructions: LWPINS, LWPVAL, LLWPCB, SLWPCB) */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hMc4MiscN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo machine check. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hMc4MiscN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo machine check. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8PerfCtlN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD performance events. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8PerfCtlN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD performance events. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8PerfCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD performance events. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8PerfCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD performance events. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8SysCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD SYS_CFG */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8SysCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD SYS_CFG */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8HwCr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD HW_CFG */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8HwCr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD HW_CFG */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8IorrBaseN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD IorrMask/IorrBase */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8IorrBaseN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD IorrMask/IorrBase */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8IorrMaskN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD IorrMask/IorrBase */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8IorrMaskN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD IorrMask/IorrBase */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8TopOfMemN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + *puValue = 0; + /** @todo return 4GB - RamHoleSize here for TOPMEM. Figure out what to return + * for TOPMEM2. */ + //if (pRange->uValue == 0) + // *puValue = _4G - RamHoleSize; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8TopOfMemN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD TOPMEM and TOPMEM2/TOM2. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8NbCfg1(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD NB_CFG1 */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8NbCfg1(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD NB_CFG1 */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8McXcptRedir(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo machine check. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8McXcptRedir(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo machine check. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8CpuNameN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVCpu->CTX_SUFF(pVM), pRange->uValue / 2 + 0x80000001, 0); + if (pLeaf) + { + if (!(pRange->uValue & 1)) + *puValue = RT_MAKE_U64(pLeaf->uEax, pLeaf->uEbx); + else + *puValue = RT_MAKE_U64(pLeaf->uEcx, pLeaf->uEdx); + } + else + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8CpuNameN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Remember guest programmed CPU name. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8HwThermalCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD HTC. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8HwThermalCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD HTC. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8SwThermalCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD STC. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8SwThermalCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD STC. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8FidVidControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD FIDVID_CTL. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8FidVidControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD FIDVID_CTL. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8FidVidStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD FIDVID_STATUS. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8McCtlMaskN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD MC. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8McCtlMaskN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD MC. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8SmiOnIoTrapN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD SMM/SMI and I/O trap. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8SmiOnIoTrapN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD SMM/SMI and I/O trap. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8SmiOnIoTrapCtlSts(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD SMM/SMI and I/O trap. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8SmiOnIoTrapCtlSts(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD SMM/SMI and I/O trap. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8IntPendingMessage(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Interrupt pending message. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8IntPendingMessage(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Interrupt pending message. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8SmiTriggerIoCycle(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD SMM/SMI and trigger I/O cycle. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8SmiTriggerIoCycle(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD SMM/SMI and trigger I/O cycle. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hMmioCfgBaseAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD MMIO Configuration base address. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hMmioCfgBaseAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD MMIO Configuration base address. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hTrapCtlMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD 0xc0010059. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hTrapCtlMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD 0xc0010059. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hPStateCurLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD P-states. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hPStateControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD P-states. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hPStateControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD P-states. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hPStateStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD P-states. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hPStateStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD P-states. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hPStateN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD P-states. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hPStateN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD P-states. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hCofVidControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD P-states. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hCofVidControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD P-states. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hCofVidStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD P-states. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hCofVidStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /* Note! Writing 0 seems to not GP, not sure if it does anything to the value... */ + /** @todo AMD P-states. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hCStateIoBaseAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD C-states. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hCStateIoBaseAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD C-states. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hCpuWatchdogTimer(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD machine checks. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hCpuWatchdogTimer(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD machine checks. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8SmmBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD SMM. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8SmmBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD SMM. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8SmmAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD SMM. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8SmmAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD SMM. */ + return VINF_SUCCESS; +} + + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8SmmMask(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD SMM. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8SmmMask(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD SMM. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8VmCr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD SVM. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8VmCr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD SVM. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8IgnNe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD IGNNE\# control. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8IgnNe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD IGNNE\# control. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8SmmCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD SMM. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8SmmCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD SMM. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8VmHSavePa(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD SVM. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8VmHSavePa(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD SVM. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hVmLockKey(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD SVM. */ + *puValue = 0; /* RAZ */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hVmLockKey(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD SVM. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hSmmLockKey(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD SMM. */ + *puValue = 0; /* RAZ */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hSmmLockKey(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD SMM. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hLocalSmiStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD SMM/SMI. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hLocalSmiStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD SMM/SMI. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hOsVisWrkIdLength(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD OS visible workaround. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hOsVisWrkIdLength(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD OS visible workaround. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hOsVisWrkStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD OS visible workaround. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hOsVisWrkStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD OS visible workaround. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam16hL2IPerfCtlN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD L2I performance counters. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam16hL2IPerfCtlN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD L2I performance counters. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam16hL2IPerfCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD L2I performance counters. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam16hL2IPerfCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD L2I performance counters. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam15hNorthbridgePerfCtlN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD Northbridge performance counters. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam15hNorthbridgePerfCtlN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD Northbridge performance counters. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam15hNorthbridgePerfCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD Northbridge performance counters. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam15hNorthbridgePerfCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD Northbridge performance counters. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK7MicrocodeCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo Undocumented register only seen mentioned in fam15h erratum \#608. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK7MicrocodeCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo Undocumented register only seen mentioned in fam15h erratum \#608. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK7ClusterIdMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo Undocumented register only seen mentioned in fam16h BKDG r3.00 when + * describing EBL_CR_POWERON. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK7ClusterIdMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo Undocumented register only seen mentioned in fam16h BKDG r3.00 when + * describing EBL_CR_POWERON. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8CpuIdCtlStd07hEbax(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVCpu->CTX_SUFF(pVM), 0x00000007, 0); + if (pLeaf) + *puValue = RT_MAKE_U64(pLeaf->uEbx, pLeaf->uEax); + else + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8CpuIdCtlStd07hEbax(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Changing CPUID leaf 7/0. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8CpuIdCtlStd06hEcx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVCpu->CTX_SUFF(pVM), 0x00000006, 0); + if (pLeaf) + *puValue = pLeaf->uEcx; + else + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8CpuIdCtlStd06hEcx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Changing CPUID leaf 6. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8CpuIdCtlStd01hEdcx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVCpu->CTX_SUFF(pVM), 0x00000001, 0); + if (pLeaf) + *puValue = RT_MAKE_U64(pLeaf->uEdx, pLeaf->uEcx); + else + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8CpuIdCtlStd01hEdcx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Changing CPUID leaf 0x80000001. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8CpuIdCtlExt01hEdcx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVCpu->CTX_SUFF(pVM), 0x80000001, 0); + if (pLeaf) + *puValue = RT_MAKE_U64(pLeaf->uEdx, pLeaf->uEcx); + else + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8CpuIdCtlExt01hEdcx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Changing CPUID leaf 0x80000001. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK8PatchLevel(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Fake AMD microcode patching. */ + *puValue = pRange->uValue; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK8PatchLoader(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Fake AMD microcode patching. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK7DebugStatusMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo undocumented */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK7DebugStatusMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo undocumented */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK7BHTraceBaseMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo undocumented */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK7BHTraceBaseMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo undocumented */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK7BHTracePtrMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo undocumented */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK7BHTracePtrMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo undocumented */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK7BHTraceLimitMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo undocumented */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK7BHTraceLimitMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo undocumented */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK7HardwareDebugToolCfgMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo undocumented */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK7HardwareDebugToolCfgMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo undocumented */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK7FastFlushCountMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo undocumented */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK7FastFlushCountMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo undocumented */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK7NodeId(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo AMD node ID and bios scratch. */ + *puValue = 0; /* nodeid = 0; nodes-per-cpu = 1 */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK7NodeId(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo AMD node ID and bios scratch. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK7DrXAddrMaskN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo AMD DRx address masking (range breakpoints). */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK7DrXAddrMaskN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo AMD DRx address masking (range breakpoints). */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK7Dr0DataMatchMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo AMD undocument debugging features. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK7Dr0DataMatchMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo AMD undocument debugging features. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK7Dr0DataMaskMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo AMD undocument debugging features. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK7Dr0DataMaskMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo AMD undocument debugging features. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK7LoadStoreCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo AMD load-store config. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK7LoadStoreCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo AMD load-store config. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK7InstrCacheCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo AMD instruction cache config. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK7InstrCacheCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo AMD instruction cache config. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK7DataCacheCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo AMD data cache config. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK7DataCacheCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo AMD data cache config. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK7BusUnitCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo AMD bus unit config. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK7BusUnitCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo AMD bus unit config. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdK7DebugCtl2Maybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo Undocument AMD debug control register \#2. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdK7DebugCtl2Maybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older + * cpus. Need to be explored and verify K7 presence. */ + /** @todo Undocument AMD debug control register \#2. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam15hFpuCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD FPU config. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam15hFpuCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD FPU config. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam15hDecoderCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD decoder config. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam15hDecoderCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD decoder config. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hBusUnitCfg2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /* Note! 10h and 16h */ + /** @todo AMD bus unit config. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hBusUnitCfg2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /* Note! 10h and 16h */ + /** @todo AMD bus unit config. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam15hCombUnitCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD unit config. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam15hCombUnitCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD unit config. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam15hCombUnitCfg2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD unit config 2. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam15hCombUnitCfg2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD unit config 2. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam15hCombUnitCfg3(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD combined unit config 3. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam15hCombUnitCfg3(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD combined unit config 3. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam15hExecUnitCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD execution unit config. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam15hExecUnitCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD execution unit config. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam15hLoadStoreCfg2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD load-store config 2. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam15hLoadStoreCfg2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD load-store config 2. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsFetchCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD IBS. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsFetchCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD IBS. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsFetchLinAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD IBS. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsFetchLinAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD IBS. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsFetchPhysAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD IBS. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsFetchPhysAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD IBS. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsOpExecCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD IBS. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsOpExecCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD IBS. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsOpRip(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD IBS. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsOpRip(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD IBS. */ + if (!X86_IS_CANONICAL(uValue)) + { + Log(("CPUM: wrmsr %s(%#x), %#llx -> %#GP - not canonical\n", pRange->szName, idMsr, uValue)); + return VERR_CPUM_RAISE_GP_0; + } + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsOpData(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD IBS. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsOpData(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD IBS. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsOpData2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD IBS. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsOpData2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD IBS. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsOpData3(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD IBS. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsOpData3(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD IBS. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsDcLinAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD IBS. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsDcLinAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD IBS. */ + if (!X86_IS_CANONICAL(uValue)) + { + Log(("CPUM: wrmsr %s(%#x), %#llx -> %#GP - not canonical\n", pRange->szName, idMsr, uValue)); + return VERR_CPUM_RAISE_GP_0; + } + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsDcPhysAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD IBS. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsDcPhysAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD IBS. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD IBS. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD IBS. */ + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMRDMSR} */ +static DECLCALLBACK(int) cpumMsrRd_AmdFam14hIbsBrTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) +{ + /** @todo AMD IBS. */ + *puValue = 0; + return VINF_SUCCESS; +} + + +/** @callback_method_impl{FNCPUMWRMSR} */ +static DECLCALLBACK(int) cpumMsrWr_AmdFam14hIbsBrTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) +{ + /** @todo AMD IBS. */ + if (!X86_IS_CANONICAL(uValue)) + { + Log(("CPUM: wrmsr %s(%#x), %#llx -> %#GP - not canonical\n", pRange->szName, idMsr, uValue)); + return VERR_CPUM_RAISE_GP_0; + } + return VINF_SUCCESS; +} + + + +/** + * MSR read function table. + */ +static const PFNCPUMRDMSR g_aCpumRdMsrFns[kCpumMsrRdFn_End] = +{ + NULL, /* Invalid */ + cpumMsrRd_FixedValue, + NULL, /* Alias */ + cpumMsrRd_WriteOnly, + cpumMsrRd_Ia32P5McAddr, + cpumMsrRd_Ia32P5McType, + cpumMsrRd_Ia32TimestampCounter, + cpumMsrRd_Ia32PlatformId, + cpumMsrRd_Ia32ApicBase, + cpumMsrRd_Ia32FeatureControl, + cpumMsrRd_Ia32BiosSignId, + cpumMsrRd_Ia32SmmMonitorCtl, + cpumMsrRd_Ia32PmcN, + cpumMsrRd_Ia32MonitorFilterLineSize, + cpumMsrRd_Ia32MPerf, + cpumMsrRd_Ia32APerf, + cpumMsrRd_Ia32MtrrCap, + cpumMsrRd_Ia32MtrrPhysBaseN, + cpumMsrRd_Ia32MtrrPhysMaskN, + cpumMsrRd_Ia32MtrrFixed, + cpumMsrRd_Ia32MtrrDefType, + cpumMsrRd_Ia32Pat, + cpumMsrRd_Ia32SysEnterCs, + cpumMsrRd_Ia32SysEnterEsp, + cpumMsrRd_Ia32SysEnterEip, + cpumMsrRd_Ia32McgCap, + cpumMsrRd_Ia32McgStatus, + cpumMsrRd_Ia32McgCtl, + cpumMsrRd_Ia32DebugCtl, + cpumMsrRd_Ia32SmrrPhysBase, + cpumMsrRd_Ia32SmrrPhysMask, + cpumMsrRd_Ia32PlatformDcaCap, + cpumMsrRd_Ia32CpuDcaCap, + cpumMsrRd_Ia32Dca0Cap, + cpumMsrRd_Ia32PerfEvtSelN, + cpumMsrRd_Ia32PerfStatus, + cpumMsrRd_Ia32PerfCtl, + cpumMsrRd_Ia32FixedCtrN, + cpumMsrRd_Ia32PerfCapabilities, + cpumMsrRd_Ia32FixedCtrCtrl, + cpumMsrRd_Ia32PerfGlobalStatus, + cpumMsrRd_Ia32PerfGlobalCtrl, + cpumMsrRd_Ia32PerfGlobalOvfCtrl, + cpumMsrRd_Ia32PebsEnable, + cpumMsrRd_Ia32ClockModulation, + cpumMsrRd_Ia32ThermInterrupt, + cpumMsrRd_Ia32ThermStatus, + cpumMsrRd_Ia32Therm2Ctl, + cpumMsrRd_Ia32MiscEnable, + cpumMsrRd_Ia32McCtlStatusAddrMiscN, + cpumMsrRd_Ia32McNCtl2, + cpumMsrRd_Ia32DsArea, + cpumMsrRd_Ia32TscDeadline, + cpumMsrRd_Ia32X2ApicN, + cpumMsrRd_Ia32DebugInterface, + cpumMsrRd_Ia32VmxBase, + cpumMsrRd_Ia32VmxPinbasedCtls, + cpumMsrRd_Ia32VmxProcbasedCtls, + cpumMsrRd_Ia32VmxExitCtls, + cpumMsrRd_Ia32VmxEntryCtls, + cpumMsrRd_Ia32VmxMisc, + cpumMsrRd_Ia32VmxCr0Fixed0, + cpumMsrRd_Ia32VmxCr0Fixed1, + cpumMsrRd_Ia32VmxCr4Fixed0, + cpumMsrRd_Ia32VmxCr4Fixed1, + cpumMsrRd_Ia32VmxVmcsEnum, + cpumMsrRd_Ia32VmxProcBasedCtls2, + cpumMsrRd_Ia32VmxEptVpidCap, + cpumMsrRd_Ia32VmxTruePinbasedCtls, + cpumMsrRd_Ia32VmxTrueProcbasedCtls, + cpumMsrRd_Ia32VmxTrueExitCtls, + cpumMsrRd_Ia32VmxTrueEntryCtls, + + cpumMsrRd_Amd64Efer, + cpumMsrRd_Amd64SyscallTarget, + cpumMsrRd_Amd64LongSyscallTarget, + cpumMsrRd_Amd64CompSyscallTarget, + cpumMsrRd_Amd64SyscallFlagMask, + cpumMsrRd_Amd64FsBase, + cpumMsrRd_Amd64GsBase, + cpumMsrRd_Amd64KernelGsBase, + cpumMsrRd_Amd64TscAux, + + cpumMsrRd_IntelEblCrPowerOn, + cpumMsrRd_IntelI7CoreThreadCount, + cpumMsrRd_IntelP4EbcHardPowerOn, + cpumMsrRd_IntelP4EbcSoftPowerOn, + cpumMsrRd_IntelP4EbcFrequencyId, + cpumMsrRd_IntelP6FsbFrequency, + cpumMsrRd_IntelPlatformInfo, + cpumMsrRd_IntelFlexRatio, + cpumMsrRd_IntelPkgCStConfigControl, + cpumMsrRd_IntelPmgIoCaptureBase, + cpumMsrRd_IntelLastBranchFromToN, + cpumMsrRd_IntelLastBranchFromN, + cpumMsrRd_IntelLastBranchToN, + cpumMsrRd_IntelLastBranchTos, + cpumMsrRd_IntelBblCrCtl, + cpumMsrRd_IntelBblCrCtl3, + cpumMsrRd_IntelI7TemperatureTarget, + cpumMsrRd_IntelI7MsrOffCoreResponseN, + cpumMsrRd_IntelI7MiscPwrMgmt, + cpumMsrRd_IntelP6CrN, + cpumMsrRd_IntelCpuId1FeatureMaskEcdx, + cpumMsrRd_IntelCpuId1FeatureMaskEax, + cpumMsrRd_IntelCpuId80000001FeatureMaskEcdx, + cpumMsrRd_IntelI7SandyAesNiCtl, + cpumMsrRd_IntelI7TurboRatioLimit, + cpumMsrRd_IntelI7LbrSelect, + cpumMsrRd_IntelI7SandyErrorControl, + cpumMsrRd_IntelI7VirtualLegacyWireCap, + cpumMsrRd_IntelI7PowerCtl, + cpumMsrRd_IntelI7SandyPebsNumAlt, + cpumMsrRd_IntelI7PebsLdLat, + cpumMsrRd_IntelI7PkgCnResidencyN, + cpumMsrRd_IntelI7CoreCnResidencyN, + cpumMsrRd_IntelI7SandyVrCurrentConfig, + cpumMsrRd_IntelI7SandyVrMiscConfig, + cpumMsrRd_IntelI7SandyRaplPowerUnit, + cpumMsrRd_IntelI7SandyPkgCnIrtlN, + cpumMsrRd_IntelI7SandyPkgC2Residency, + cpumMsrRd_IntelI7RaplPkgPowerLimit, + cpumMsrRd_IntelI7RaplPkgEnergyStatus, + cpumMsrRd_IntelI7RaplPkgPerfStatus, + cpumMsrRd_IntelI7RaplPkgPowerInfo, + cpumMsrRd_IntelI7RaplDramPowerLimit, + cpumMsrRd_IntelI7RaplDramEnergyStatus, + cpumMsrRd_IntelI7RaplDramPerfStatus, + cpumMsrRd_IntelI7RaplDramPowerInfo, + cpumMsrRd_IntelI7RaplPp0PowerLimit, + cpumMsrRd_IntelI7RaplPp0EnergyStatus, + cpumMsrRd_IntelI7RaplPp0Policy, + cpumMsrRd_IntelI7RaplPp0PerfStatus, + cpumMsrRd_IntelI7RaplPp1PowerLimit, + cpumMsrRd_IntelI7RaplPp1EnergyStatus, + cpumMsrRd_IntelI7RaplPp1Policy, + cpumMsrRd_IntelI7IvyConfigTdpNominal, + cpumMsrRd_IntelI7IvyConfigTdpLevel1, + cpumMsrRd_IntelI7IvyConfigTdpLevel2, + cpumMsrRd_IntelI7IvyConfigTdpControl, + cpumMsrRd_IntelI7IvyTurboActivationRatio, + cpumMsrRd_IntelI7UncPerfGlobalCtrl, + cpumMsrRd_IntelI7UncPerfGlobalStatus, + cpumMsrRd_IntelI7UncPerfGlobalOvfCtrl, + cpumMsrRd_IntelI7UncPerfFixedCtrCtrl, + cpumMsrRd_IntelI7UncPerfFixedCtr, + cpumMsrRd_IntelI7UncCBoxConfig, + cpumMsrRd_IntelI7UncArbPerfCtrN, + cpumMsrRd_IntelI7UncArbPerfEvtSelN, + cpumMsrRd_IntelCore2EmttmCrTablesN, + cpumMsrRd_IntelCore2SmmCStMiscInfo, + cpumMsrRd_IntelCore1ExtConfig, + cpumMsrRd_IntelCore1DtsCalControl, + cpumMsrRd_IntelCore2PeciControl, + + cpumMsrRd_P6LastBranchFromIp, + cpumMsrRd_P6LastBranchToIp, + cpumMsrRd_P6LastIntFromIp, + cpumMsrRd_P6LastIntToIp, + + cpumMsrRd_AmdFam15hTscRate, + cpumMsrRd_AmdFam15hLwpCfg, + cpumMsrRd_AmdFam15hLwpCbAddr, + cpumMsrRd_AmdFam10hMc4MiscN, + cpumMsrRd_AmdK8PerfCtlN, + cpumMsrRd_AmdK8PerfCtrN, + cpumMsrRd_AmdK8SysCfg, + cpumMsrRd_AmdK8HwCr, + cpumMsrRd_AmdK8IorrBaseN, + cpumMsrRd_AmdK8IorrMaskN, + cpumMsrRd_AmdK8TopOfMemN, + cpumMsrRd_AmdK8NbCfg1, + cpumMsrRd_AmdK8McXcptRedir, + cpumMsrRd_AmdK8CpuNameN, + cpumMsrRd_AmdK8HwThermalCtrl, + cpumMsrRd_AmdK8SwThermalCtrl, + cpumMsrRd_AmdK8FidVidControl, + cpumMsrRd_AmdK8FidVidStatus, + cpumMsrRd_AmdK8McCtlMaskN, + cpumMsrRd_AmdK8SmiOnIoTrapN, + cpumMsrRd_AmdK8SmiOnIoTrapCtlSts, + cpumMsrRd_AmdK8IntPendingMessage, + cpumMsrRd_AmdK8SmiTriggerIoCycle, + cpumMsrRd_AmdFam10hMmioCfgBaseAddr, + cpumMsrRd_AmdFam10hTrapCtlMaybe, + cpumMsrRd_AmdFam10hPStateCurLimit, + cpumMsrRd_AmdFam10hPStateControl, + cpumMsrRd_AmdFam10hPStateStatus, + cpumMsrRd_AmdFam10hPStateN, + cpumMsrRd_AmdFam10hCofVidControl, + cpumMsrRd_AmdFam10hCofVidStatus, + cpumMsrRd_AmdFam10hCStateIoBaseAddr, + cpumMsrRd_AmdFam10hCpuWatchdogTimer, + cpumMsrRd_AmdK8SmmBase, + cpumMsrRd_AmdK8SmmAddr, + cpumMsrRd_AmdK8SmmMask, + cpumMsrRd_AmdK8VmCr, + cpumMsrRd_AmdK8IgnNe, + cpumMsrRd_AmdK8SmmCtl, + cpumMsrRd_AmdK8VmHSavePa, + cpumMsrRd_AmdFam10hVmLockKey, + cpumMsrRd_AmdFam10hSmmLockKey, + cpumMsrRd_AmdFam10hLocalSmiStatus, + cpumMsrRd_AmdFam10hOsVisWrkIdLength, + cpumMsrRd_AmdFam10hOsVisWrkStatus, + cpumMsrRd_AmdFam16hL2IPerfCtlN, + cpumMsrRd_AmdFam16hL2IPerfCtrN, + cpumMsrRd_AmdFam15hNorthbridgePerfCtlN, + cpumMsrRd_AmdFam15hNorthbridgePerfCtrN, + cpumMsrRd_AmdK7MicrocodeCtl, + cpumMsrRd_AmdK7ClusterIdMaybe, + cpumMsrRd_AmdK8CpuIdCtlStd07hEbax, + cpumMsrRd_AmdK8CpuIdCtlStd06hEcx, + cpumMsrRd_AmdK8CpuIdCtlStd01hEdcx, + cpumMsrRd_AmdK8CpuIdCtlExt01hEdcx, + cpumMsrRd_AmdK8PatchLevel, + cpumMsrRd_AmdK7DebugStatusMaybe, + cpumMsrRd_AmdK7BHTraceBaseMaybe, + cpumMsrRd_AmdK7BHTracePtrMaybe, + cpumMsrRd_AmdK7BHTraceLimitMaybe, + cpumMsrRd_AmdK7HardwareDebugToolCfgMaybe, + cpumMsrRd_AmdK7FastFlushCountMaybe, + cpumMsrRd_AmdK7NodeId, + cpumMsrRd_AmdK7DrXAddrMaskN, + cpumMsrRd_AmdK7Dr0DataMatchMaybe, + cpumMsrRd_AmdK7Dr0DataMaskMaybe, + cpumMsrRd_AmdK7LoadStoreCfg, + cpumMsrRd_AmdK7InstrCacheCfg, + cpumMsrRd_AmdK7DataCacheCfg, + cpumMsrRd_AmdK7BusUnitCfg, + cpumMsrRd_AmdK7DebugCtl2Maybe, + cpumMsrRd_AmdFam15hFpuCfg, + cpumMsrRd_AmdFam15hDecoderCfg, + cpumMsrRd_AmdFam10hBusUnitCfg2, + cpumMsrRd_AmdFam15hCombUnitCfg, + cpumMsrRd_AmdFam15hCombUnitCfg2, + cpumMsrRd_AmdFam15hCombUnitCfg3, + cpumMsrRd_AmdFam15hExecUnitCfg, + cpumMsrRd_AmdFam15hLoadStoreCfg2, + cpumMsrRd_AmdFam10hIbsFetchCtl, + cpumMsrRd_AmdFam10hIbsFetchLinAddr, + cpumMsrRd_AmdFam10hIbsFetchPhysAddr, + cpumMsrRd_AmdFam10hIbsOpExecCtl, + cpumMsrRd_AmdFam10hIbsOpRip, + cpumMsrRd_AmdFam10hIbsOpData, + cpumMsrRd_AmdFam10hIbsOpData2, + cpumMsrRd_AmdFam10hIbsOpData3, + cpumMsrRd_AmdFam10hIbsDcLinAddr, + cpumMsrRd_AmdFam10hIbsDcPhysAddr, + cpumMsrRd_AmdFam10hIbsCtl, + cpumMsrRd_AmdFam14hIbsBrTarget, +}; + + +/** + * MSR write function table. + */ +static const PFNCPUMWRMSR g_aCpumWrMsrFns[kCpumMsrWrFn_End] = +{ + NULL, /* Invalid */ + cpumMsrWr_IgnoreWrite, + cpumMsrWr_ReadOnly, + NULL, /* Alias */ + cpumMsrWr_Ia32P5McAddr, + cpumMsrWr_Ia32P5McType, + cpumMsrWr_Ia32TimestampCounter, + cpumMsrWr_Ia32ApicBase, + cpumMsrWr_Ia32FeatureControl, + cpumMsrWr_Ia32BiosSignId, + cpumMsrWr_Ia32BiosUpdateTrigger, + cpumMsrWr_Ia32SmmMonitorCtl, + cpumMsrWr_Ia32PmcN, + cpumMsrWr_Ia32MonitorFilterLineSize, + cpumMsrWr_Ia32MPerf, + cpumMsrWr_Ia32APerf, + cpumMsrWr_Ia32MtrrPhysBaseN, + cpumMsrWr_Ia32MtrrPhysMaskN, + cpumMsrWr_Ia32MtrrFixed, + cpumMsrWr_Ia32MtrrDefType, + cpumMsrWr_Ia32Pat, + cpumMsrWr_Ia32SysEnterCs, + cpumMsrWr_Ia32SysEnterEsp, + cpumMsrWr_Ia32SysEnterEip, + cpumMsrWr_Ia32McgStatus, + cpumMsrWr_Ia32McgCtl, + cpumMsrWr_Ia32DebugCtl, + cpumMsrWr_Ia32SmrrPhysBase, + cpumMsrWr_Ia32SmrrPhysMask, + cpumMsrWr_Ia32PlatformDcaCap, + cpumMsrWr_Ia32Dca0Cap, + cpumMsrWr_Ia32PerfEvtSelN, + cpumMsrWr_Ia32PerfStatus, + cpumMsrWr_Ia32PerfCtl, + cpumMsrWr_Ia32FixedCtrN, + cpumMsrWr_Ia32PerfCapabilities, + cpumMsrWr_Ia32FixedCtrCtrl, + cpumMsrWr_Ia32PerfGlobalStatus, + cpumMsrWr_Ia32PerfGlobalCtrl, + cpumMsrWr_Ia32PerfGlobalOvfCtrl, + cpumMsrWr_Ia32PebsEnable, + cpumMsrWr_Ia32ClockModulation, + cpumMsrWr_Ia32ThermInterrupt, + cpumMsrWr_Ia32ThermStatus, + cpumMsrWr_Ia32Therm2Ctl, + cpumMsrWr_Ia32MiscEnable, + cpumMsrWr_Ia32McCtlStatusAddrMiscN, + cpumMsrWr_Ia32McNCtl2, + cpumMsrWr_Ia32DsArea, + cpumMsrWr_Ia32TscDeadline, + cpumMsrWr_Ia32X2ApicN, + cpumMsrWr_Ia32DebugInterface, + + cpumMsrWr_Amd64Efer, + cpumMsrWr_Amd64SyscallTarget, + cpumMsrWr_Amd64LongSyscallTarget, + cpumMsrWr_Amd64CompSyscallTarget, + cpumMsrWr_Amd64SyscallFlagMask, + cpumMsrWr_Amd64FsBase, + cpumMsrWr_Amd64GsBase, + cpumMsrWr_Amd64KernelGsBase, + cpumMsrWr_Amd64TscAux, + + cpumMsrWr_IntelEblCrPowerOn, + cpumMsrWr_IntelP4EbcHardPowerOn, + cpumMsrWr_IntelP4EbcSoftPowerOn, + cpumMsrWr_IntelP4EbcFrequencyId, + cpumMsrWr_IntelFlexRatio, + cpumMsrWr_IntelPkgCStConfigControl, + cpumMsrWr_IntelPmgIoCaptureBase, + cpumMsrWr_IntelLastBranchFromToN, + cpumMsrWr_IntelLastBranchFromN, + cpumMsrWr_IntelLastBranchToN, + cpumMsrWr_IntelLastBranchTos, + cpumMsrWr_IntelBblCrCtl, + cpumMsrWr_IntelBblCrCtl3, + cpumMsrWr_IntelI7TemperatureTarget, + cpumMsrWr_IntelI7MsrOffCoreResponseN, + cpumMsrWr_IntelI7MiscPwrMgmt, + cpumMsrWr_IntelP6CrN, + cpumMsrWr_IntelCpuId1FeatureMaskEcdx, + cpumMsrWr_IntelCpuId1FeatureMaskEax, + cpumMsrWr_IntelCpuId80000001FeatureMaskEcdx, + cpumMsrWr_IntelI7SandyAesNiCtl, + cpumMsrWr_IntelI7TurboRatioLimit, + cpumMsrWr_IntelI7LbrSelect, + cpumMsrWr_IntelI7SandyErrorControl, + cpumMsrWr_IntelI7PowerCtl, + cpumMsrWr_IntelI7SandyPebsNumAlt, + cpumMsrWr_IntelI7PebsLdLat, + cpumMsrWr_IntelI7SandyVrCurrentConfig, + cpumMsrWr_IntelI7SandyVrMiscConfig, + cpumMsrWr_IntelI7SandyPkgCnIrtlN, + cpumMsrWr_IntelI7RaplPkgPowerLimit, + cpumMsrWr_IntelI7RaplDramPowerLimit, + cpumMsrWr_IntelI7RaplPp0PowerLimit, + cpumMsrWr_IntelI7RaplPp0Policy, + cpumMsrWr_IntelI7RaplPp1PowerLimit, + cpumMsrWr_IntelI7RaplPp1Policy, + cpumMsrWr_IntelI7IvyConfigTdpControl, + cpumMsrWr_IntelI7IvyTurboActivationRatio, + cpumMsrWr_IntelI7UncPerfGlobalCtrl, + cpumMsrWr_IntelI7UncPerfGlobalStatus, + cpumMsrWr_IntelI7UncPerfGlobalOvfCtrl, + cpumMsrWr_IntelI7UncPerfFixedCtrCtrl, + cpumMsrWr_IntelI7UncPerfFixedCtr, + cpumMsrWr_IntelI7UncArbPerfCtrN, + cpumMsrWr_IntelI7UncArbPerfEvtSelN, + cpumMsrWr_IntelCore2EmttmCrTablesN, + cpumMsrWr_IntelCore2SmmCStMiscInfo, + cpumMsrWr_IntelCore1ExtConfig, + cpumMsrWr_IntelCore1DtsCalControl, + cpumMsrWr_IntelCore2PeciControl, + + cpumMsrWr_P6LastIntFromIp, + cpumMsrWr_P6LastIntToIp, + + cpumMsrWr_AmdFam15hTscRate, + cpumMsrWr_AmdFam15hLwpCfg, + cpumMsrWr_AmdFam15hLwpCbAddr, + cpumMsrWr_AmdFam10hMc4MiscN, + cpumMsrWr_AmdK8PerfCtlN, + cpumMsrWr_AmdK8PerfCtrN, + cpumMsrWr_AmdK8SysCfg, + cpumMsrWr_AmdK8HwCr, + cpumMsrWr_AmdK8IorrBaseN, + cpumMsrWr_AmdK8IorrMaskN, + cpumMsrWr_AmdK8TopOfMemN, + cpumMsrWr_AmdK8NbCfg1, + cpumMsrWr_AmdK8McXcptRedir, + cpumMsrWr_AmdK8CpuNameN, + cpumMsrWr_AmdK8HwThermalCtrl, + cpumMsrWr_AmdK8SwThermalCtrl, + cpumMsrWr_AmdK8FidVidControl, + cpumMsrWr_AmdK8McCtlMaskN, + cpumMsrWr_AmdK8SmiOnIoTrapN, + cpumMsrWr_AmdK8SmiOnIoTrapCtlSts, + cpumMsrWr_AmdK8IntPendingMessage, + cpumMsrWr_AmdK8SmiTriggerIoCycle, + cpumMsrWr_AmdFam10hMmioCfgBaseAddr, + cpumMsrWr_AmdFam10hTrapCtlMaybe, + cpumMsrWr_AmdFam10hPStateControl, + cpumMsrWr_AmdFam10hPStateStatus, + cpumMsrWr_AmdFam10hPStateN, + cpumMsrWr_AmdFam10hCofVidControl, + cpumMsrWr_AmdFam10hCofVidStatus, + cpumMsrWr_AmdFam10hCStateIoBaseAddr, + cpumMsrWr_AmdFam10hCpuWatchdogTimer, + cpumMsrWr_AmdK8SmmBase, + cpumMsrWr_AmdK8SmmAddr, + cpumMsrWr_AmdK8SmmMask, + cpumMsrWr_AmdK8VmCr, + cpumMsrWr_AmdK8IgnNe, + cpumMsrWr_AmdK8SmmCtl, + cpumMsrWr_AmdK8VmHSavePa, + cpumMsrWr_AmdFam10hVmLockKey, + cpumMsrWr_AmdFam10hSmmLockKey, + cpumMsrWr_AmdFam10hLocalSmiStatus, + cpumMsrWr_AmdFam10hOsVisWrkIdLength, + cpumMsrWr_AmdFam10hOsVisWrkStatus, + cpumMsrWr_AmdFam16hL2IPerfCtlN, + cpumMsrWr_AmdFam16hL2IPerfCtrN, + cpumMsrWr_AmdFam15hNorthbridgePerfCtlN, + cpumMsrWr_AmdFam15hNorthbridgePerfCtrN, + cpumMsrWr_AmdK7MicrocodeCtl, + cpumMsrWr_AmdK7ClusterIdMaybe, + cpumMsrWr_AmdK8CpuIdCtlStd07hEbax, + cpumMsrWr_AmdK8CpuIdCtlStd06hEcx, + cpumMsrWr_AmdK8CpuIdCtlStd01hEdcx, + cpumMsrWr_AmdK8CpuIdCtlExt01hEdcx, + cpumMsrWr_AmdK8PatchLoader, + cpumMsrWr_AmdK7DebugStatusMaybe, + cpumMsrWr_AmdK7BHTraceBaseMaybe, + cpumMsrWr_AmdK7BHTracePtrMaybe, + cpumMsrWr_AmdK7BHTraceLimitMaybe, + cpumMsrWr_AmdK7HardwareDebugToolCfgMaybe, + cpumMsrWr_AmdK7FastFlushCountMaybe, + cpumMsrWr_AmdK7NodeId, + cpumMsrWr_AmdK7DrXAddrMaskN, + cpumMsrWr_AmdK7Dr0DataMatchMaybe, + cpumMsrWr_AmdK7Dr0DataMaskMaybe, + cpumMsrWr_AmdK7LoadStoreCfg, + cpumMsrWr_AmdK7InstrCacheCfg, + cpumMsrWr_AmdK7DataCacheCfg, + cpumMsrWr_AmdK7BusUnitCfg, + cpumMsrWr_AmdK7DebugCtl2Maybe, + cpumMsrWr_AmdFam15hFpuCfg, + cpumMsrWr_AmdFam15hDecoderCfg, + cpumMsrWr_AmdFam10hBusUnitCfg2, + cpumMsrWr_AmdFam15hCombUnitCfg, + cpumMsrWr_AmdFam15hCombUnitCfg2, + cpumMsrWr_AmdFam15hCombUnitCfg3, + cpumMsrWr_AmdFam15hExecUnitCfg, + cpumMsrWr_AmdFam15hLoadStoreCfg2, + cpumMsrWr_AmdFam10hIbsFetchCtl, + cpumMsrWr_AmdFam10hIbsFetchLinAddr, + cpumMsrWr_AmdFam10hIbsFetchPhysAddr, + cpumMsrWr_AmdFam10hIbsOpExecCtl, + cpumMsrWr_AmdFam10hIbsOpRip, + cpumMsrWr_AmdFam10hIbsOpData, + cpumMsrWr_AmdFam10hIbsOpData2, + cpumMsrWr_AmdFam10hIbsOpData3, + cpumMsrWr_AmdFam10hIbsDcLinAddr, + cpumMsrWr_AmdFam10hIbsDcPhysAddr, + cpumMsrWr_AmdFam10hIbsCtl, + cpumMsrWr_AmdFam14hIbsBrTarget, +}; + + +/** + * Looks up the range for the given MSR. + * + * @returns Pointer to the range if found, NULL if not. + * @param pVM The cross context VM structure. + * @param idMsr The MSR to look up. + */ +# ifndef IN_RING3 +static +# endif +PCPUMMSRRANGE cpumLookupMsrRange(PVM pVM, uint32_t idMsr) +{ + /* + * Binary lookup. + */ + uint32_t cRanges = pVM->cpum.s.GuestInfo.cMsrRanges; + if (!cRanges) + return NULL; + PCPUMMSRRANGE paRanges = pVM->cpum.s.GuestInfo.CTX_SUFF(paMsrRanges); + for (;;) + { + uint32_t i = cRanges / 2; + if (idMsr < paRanges[i].uFirst) + { + if (i == 0) + break; + cRanges = i; + } + else if (idMsr > paRanges[i].uLast) + { + i++; + if (i >= cRanges) + break; + cRanges -= i; + paRanges = &paRanges[i]; + } + else + { + if (paRanges[i].enmRdFn == kCpumMsrRdFn_MsrAlias) + return cpumLookupMsrRange(pVM, paRanges[i].uValue); + return &paRanges[i]; + } + } + +# ifdef VBOX_STRICT + /* + * Linear lookup to verify the above binary search. + */ + uint32_t cLeft = pVM->cpum.s.GuestInfo.cMsrRanges; + PCPUMMSRRANGE pCur = pVM->cpum.s.GuestInfo.CTX_SUFF(paMsrRanges); + while (cLeft-- > 0) + { + if (idMsr >= pCur->uFirst && idMsr <= pCur->uLast) + { + AssertFailed(); + if (pCur->enmRdFn == kCpumMsrRdFn_MsrAlias) + return cpumLookupMsrRange(pVM, pCur->uValue); + return pCur; + } + pCur++; + } +# endif + return NULL; +} + +#ifdef VBOX_WITH_NEW_MSR_CODE + +/** + * Query a guest MSR. + * + * The caller is responsible for checking privilege if the call is the result of + * a RDMSR instruction. We'll do the rest. + * + * @retval VINF_SUCCESS on success. + * @retval VERR_CPUM_RAISE_GP_0 on failure (invalid MSR), the caller is + * expected to take the appropriate actions. @a *puValue is set to 0. + * @param pVCpu Pointer to the VMCPU. + * @param idMsr The MSR. + * @param puValue Where to return the value. + * + * @remarks This will always return the right values, even when we're in the + * recompiler. + */ +VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue) +{ + *puValue = 0; + + int rc; + PVM pVM = pVCpu->CTX_SUFF(pVM); + PCPUMMSRRANGE pRange = cpumLookupMsrRange(pVM, idMsr); + if (pRange) + { + CPUMMSRRDFN enmRdFn = (CPUMMSRRDFN)pRange->enmRdFn; + AssertReturn(enmRdFn > kCpumMsrRdFn_Invalid && enmRdFn < kCpumMsrRdFn_End, VERR_CPUM_IPE_1); + + PFNCPUMRDMSR pfnRdMsr = g_aCpumRdMsrFns[enmRdFn]; + AssertReturn(pfnRdMsr, VERR_CPUM_IPE_2); + + STAM_COUNTER_INC(&pRange->cReads); + STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrReads); + + rc = pfnRdMsr(pVCpu, idMsr, pRange, puValue); + if (RT_SUCCESS(rc)) + { + Log2(("CPUM: RDMSR %#x (%s) -> %#llx\n", idMsr, pRange->szName, *puValue)); + AssertMsg(rc == VINF_SUCCESS, ("%Rrc idMsr=%#x\n", rc, idMsr)); + } + else if (rc == VERR_CPUM_RAISE_GP_0) + { + Log(("CPUM: RDMSR %#x (%s) -> #GP(0)\n", idMsr, pRange->szName)); + STAM_COUNTER_INC(&pRange->cGps); + STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrReadsRaiseGp); + } + else + Log(("CPUM: RDMSR %#x (%s) -> rc=%Rrc\n", idMsr, pRange->szName, rc)); + } + else + { + Log(("CPUM: Unknown RDMSR %#x -> #GP(0)\n", idMsr)); + STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrReads); + STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrReadsUnknown); + rc = VERR_CPUM_RAISE_GP_0; + } + return rc; +} + + +/** + * Writes to a guest MSR. + * + * The caller is responsible for checking privilege if the call is the result of + * a WRMSR instruction. We'll do the rest. + * + * @retval VINF_SUCCESS on success. + * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the + * appropriate actions. + * + * @param pVCpu Pointer to the VMCPU. + * @param idMsr The MSR id. + * @param uValue The value to set. + * + * @remarks Everyone changing MSR values, including the recompiler, shall do it + * by calling this method. This makes sure we have current values and + * that we trigger all the right actions when something changes. + * + * For performance reasons, this actually isn't entirely true for some + * MSRs when in HM mode. The code here and in HM must be aware of + * this. + */ +VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue) +{ + int rc; + PVM pVM = pVCpu->CTX_SUFF(pVM); + PCPUMMSRRANGE pRange = cpumLookupMsrRange(pVM, idMsr); + if (pRange) + { + STAM_COUNTER_INC(&pRange->cWrites); + STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrWrites); + + if (!(uValue & pRange->fWrGpMask)) + { + CPUMMSRWRFN enmWrFn = (CPUMMSRWRFN)pRange->enmWrFn; + AssertReturn(enmWrFn > kCpumMsrWrFn_Invalid && enmWrFn < kCpumMsrWrFn_End, VERR_CPUM_IPE_1); + + PFNCPUMWRMSR pfnWrMsr = g_aCpumWrMsrFns[enmWrFn]; + AssertReturn(pfnWrMsr, VERR_CPUM_IPE_2); + + uint64_t uValueAdjusted = uValue & ~pRange->fWrIgnMask; + if (uValueAdjusted != uValue) + { + STAM_COUNTER_INC(&pRange->cIgnoredBits); + STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrWritesToIgnoredBits); + } + + rc = pfnWrMsr(pVCpu, idMsr, pRange, uValueAdjusted, uValue); + if (RT_SUCCESS(rc)) + { + Log2(("CPUM: WRMSR %#x (%s), %#llx [%#llx]\n", idMsr, pRange->szName, uValueAdjusted, uValue)); + AssertMsg(rc == VINF_SUCCESS, ("%Rrc idMsr=%#x\n", rc, idMsr)); + } + else if (rc == VERR_CPUM_RAISE_GP_0) + { + Log(("CPUM: WRMSR %#x (%s), %#llx [%#llx] -> #GP(0)\n", idMsr, pRange->szName, uValueAdjusted, uValue)); + STAM_COUNTER_INC(&pRange->cGps); + STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrWritesRaiseGp); + } + else + Log(("CPUM: WRMSR %#x (%s), %#llx [%#llx] -> rc=%Rrc\n", idMsr, pRange->szName, uValueAdjusted, uValue, rc)); + } + else + { + Log(("CPUM: WRMSR %#x (%s), %#llx -> #GP(0) - invalid bits %#llx\n", + idMsr, pRange->szName, uValue, uValue & pRange->fWrGpMask)); + STAM_COUNTER_INC(&pRange->cGps); + STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrWritesRaiseGp); + rc = VERR_CPUM_RAISE_GP_0; + } + } + else + { + Log(("CPUM: Unknown WRMSR %#x, %#llx -> #GP(0)\n", idMsr, uValue)); + STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrWrites); + STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrWritesUnknown); + rc = VERR_CPUM_RAISE_GP_0; + } + return rc; +} + +#endif /* VBOX_WITH_NEW_MSR_CODE */ + + +#if defined(VBOX_STRICT) && defined(IN_RING3) +/** + * Performs some checks on the static data related to MSRs. + * + * @returns VINF_SUCCESS on success, error on failure. + */ +int cpumR3MsrStrictInitChecks(void) +{ +#define CPUM_ASSERT_RD_MSR_FN(a_Register) \ + AssertReturn(g_aCpumRdMsrFns[kCpumMsrRdFn_##a_Register] == cpumMsrRd_##a_Register, VERR_CPUM_IPE_2); +#define CPUM_ASSERT_WR_MSR_FN(a_Register) \ + AssertReturn(g_aCpumWrMsrFns[kCpumMsrWrFn_##a_Register] == cpumMsrWr_##a_Register, VERR_CPUM_IPE_2); + + AssertReturn(g_aCpumRdMsrFns[kCpumMsrRdFn_Invalid] == NULL, VERR_CPUM_IPE_2); + CPUM_ASSERT_RD_MSR_FN(FixedValue); + CPUM_ASSERT_RD_MSR_FN(WriteOnly); + CPUM_ASSERT_RD_MSR_FN(Ia32P5McAddr); + CPUM_ASSERT_RD_MSR_FN(Ia32P5McType); + CPUM_ASSERT_RD_MSR_FN(Ia32TimestampCounter); + CPUM_ASSERT_RD_MSR_FN(Ia32PlatformId); + CPUM_ASSERT_RD_MSR_FN(Ia32ApicBase); + CPUM_ASSERT_RD_MSR_FN(Ia32FeatureControl); + CPUM_ASSERT_RD_MSR_FN(Ia32BiosSignId); + CPUM_ASSERT_RD_MSR_FN(Ia32SmmMonitorCtl); + CPUM_ASSERT_RD_MSR_FN(Ia32PmcN); + CPUM_ASSERT_RD_MSR_FN(Ia32MonitorFilterLineSize); + CPUM_ASSERT_RD_MSR_FN(Ia32MPerf); + CPUM_ASSERT_RD_MSR_FN(Ia32APerf); + CPUM_ASSERT_RD_MSR_FN(Ia32MtrrCap); + CPUM_ASSERT_RD_MSR_FN(Ia32MtrrPhysBaseN); + CPUM_ASSERT_RD_MSR_FN(Ia32MtrrPhysMaskN); + CPUM_ASSERT_RD_MSR_FN(Ia32MtrrFixed); + CPUM_ASSERT_RD_MSR_FN(Ia32MtrrDefType); + CPUM_ASSERT_RD_MSR_FN(Ia32Pat); + CPUM_ASSERT_RD_MSR_FN(Ia32SysEnterCs); + CPUM_ASSERT_RD_MSR_FN(Ia32SysEnterEsp); + CPUM_ASSERT_RD_MSR_FN(Ia32SysEnterEip); + CPUM_ASSERT_RD_MSR_FN(Ia32McgCap); + CPUM_ASSERT_RD_MSR_FN(Ia32McgStatus); + CPUM_ASSERT_RD_MSR_FN(Ia32McgCtl); + CPUM_ASSERT_RD_MSR_FN(Ia32DebugCtl); + CPUM_ASSERT_RD_MSR_FN(Ia32SmrrPhysBase); + CPUM_ASSERT_RD_MSR_FN(Ia32SmrrPhysMask); + CPUM_ASSERT_RD_MSR_FN(Ia32PlatformDcaCap); + CPUM_ASSERT_RD_MSR_FN(Ia32CpuDcaCap); + CPUM_ASSERT_RD_MSR_FN(Ia32Dca0Cap); + CPUM_ASSERT_RD_MSR_FN(Ia32PerfEvtSelN); + CPUM_ASSERT_RD_MSR_FN(Ia32PerfStatus); + CPUM_ASSERT_RD_MSR_FN(Ia32PerfCtl); + CPUM_ASSERT_RD_MSR_FN(Ia32FixedCtrN); + CPUM_ASSERT_RD_MSR_FN(Ia32PerfCapabilities); + CPUM_ASSERT_RD_MSR_FN(Ia32FixedCtrCtrl); + CPUM_ASSERT_RD_MSR_FN(Ia32PerfGlobalStatus); + CPUM_ASSERT_RD_MSR_FN(Ia32PerfGlobalCtrl); + CPUM_ASSERT_RD_MSR_FN(Ia32PerfGlobalOvfCtrl); + CPUM_ASSERT_RD_MSR_FN(Ia32PebsEnable); + CPUM_ASSERT_RD_MSR_FN(Ia32ClockModulation); + CPUM_ASSERT_RD_MSR_FN(Ia32ThermInterrupt); + CPUM_ASSERT_RD_MSR_FN(Ia32ThermStatus); + CPUM_ASSERT_RD_MSR_FN(Ia32MiscEnable); + CPUM_ASSERT_RD_MSR_FN(Ia32McCtlStatusAddrMiscN); + CPUM_ASSERT_RD_MSR_FN(Ia32McNCtl2); + CPUM_ASSERT_RD_MSR_FN(Ia32DsArea); + CPUM_ASSERT_RD_MSR_FN(Ia32TscDeadline); + CPUM_ASSERT_RD_MSR_FN(Ia32X2ApicN); + CPUM_ASSERT_RD_MSR_FN(Ia32DebugInterface); + CPUM_ASSERT_RD_MSR_FN(Ia32VmxBase); + CPUM_ASSERT_RD_MSR_FN(Ia32VmxPinbasedCtls); + CPUM_ASSERT_RD_MSR_FN(Ia32VmxProcbasedCtls); + CPUM_ASSERT_RD_MSR_FN(Ia32VmxExitCtls); + CPUM_ASSERT_RD_MSR_FN(Ia32VmxEntryCtls); + CPUM_ASSERT_RD_MSR_FN(Ia32VmxMisc); + CPUM_ASSERT_RD_MSR_FN(Ia32VmxCr0Fixed0); + CPUM_ASSERT_RD_MSR_FN(Ia32VmxCr0Fixed1); + CPUM_ASSERT_RD_MSR_FN(Ia32VmxCr4Fixed0); + CPUM_ASSERT_RD_MSR_FN(Ia32VmxCr4Fixed1); + CPUM_ASSERT_RD_MSR_FN(Ia32VmxVmcsEnum); + CPUM_ASSERT_RD_MSR_FN(Ia32VmxProcBasedCtls2); + CPUM_ASSERT_RD_MSR_FN(Ia32VmxEptVpidCap); + CPUM_ASSERT_RD_MSR_FN(Ia32VmxTruePinbasedCtls); + CPUM_ASSERT_RD_MSR_FN(Ia32VmxTrueProcbasedCtls); + CPUM_ASSERT_RD_MSR_FN(Ia32VmxTrueExitCtls); + CPUM_ASSERT_RD_MSR_FN(Ia32VmxTrueEntryCtls); + + CPUM_ASSERT_RD_MSR_FN(Amd64Efer); + CPUM_ASSERT_RD_MSR_FN(Amd64SyscallTarget); + CPUM_ASSERT_RD_MSR_FN(Amd64LongSyscallTarget); + CPUM_ASSERT_RD_MSR_FN(Amd64CompSyscallTarget); + CPUM_ASSERT_RD_MSR_FN(Amd64SyscallFlagMask); + CPUM_ASSERT_RD_MSR_FN(Amd64FsBase); + CPUM_ASSERT_RD_MSR_FN(Amd64GsBase); + CPUM_ASSERT_RD_MSR_FN(Amd64KernelGsBase); + CPUM_ASSERT_RD_MSR_FN(Amd64TscAux); + + CPUM_ASSERT_RD_MSR_FN(IntelEblCrPowerOn); + CPUM_ASSERT_RD_MSR_FN(IntelI7CoreThreadCount); + CPUM_ASSERT_RD_MSR_FN(IntelP4EbcHardPowerOn); + CPUM_ASSERT_RD_MSR_FN(IntelP4EbcSoftPowerOn); + CPUM_ASSERT_RD_MSR_FN(IntelP4EbcFrequencyId); + CPUM_ASSERT_RD_MSR_FN(IntelP6FsbFrequency); + CPUM_ASSERT_RD_MSR_FN(IntelPlatformInfo); + CPUM_ASSERT_RD_MSR_FN(IntelFlexRatio); + CPUM_ASSERT_RD_MSR_FN(IntelPkgCStConfigControl); + CPUM_ASSERT_RD_MSR_FN(IntelPmgIoCaptureBase); + CPUM_ASSERT_RD_MSR_FN(IntelLastBranchFromToN); + CPUM_ASSERT_RD_MSR_FN(IntelLastBranchFromN); + CPUM_ASSERT_RD_MSR_FN(IntelLastBranchToN); + CPUM_ASSERT_RD_MSR_FN(IntelLastBranchTos); + CPUM_ASSERT_RD_MSR_FN(IntelBblCrCtl); + CPUM_ASSERT_RD_MSR_FN(IntelBblCrCtl3); + CPUM_ASSERT_RD_MSR_FN(IntelI7TemperatureTarget); + CPUM_ASSERT_RD_MSR_FN(IntelI7MsrOffCoreResponseN); + CPUM_ASSERT_RD_MSR_FN(IntelI7MiscPwrMgmt); + CPUM_ASSERT_RD_MSR_FN(IntelP6CrN); + CPUM_ASSERT_RD_MSR_FN(IntelCpuId1FeatureMaskEcdx); + CPUM_ASSERT_RD_MSR_FN(IntelCpuId1FeatureMaskEax); + CPUM_ASSERT_RD_MSR_FN(IntelCpuId80000001FeatureMaskEcdx); + CPUM_ASSERT_RD_MSR_FN(IntelI7SandyAesNiCtl); + CPUM_ASSERT_RD_MSR_FN(IntelI7TurboRatioLimit); + CPUM_ASSERT_RD_MSR_FN(IntelI7LbrSelect); + CPUM_ASSERT_RD_MSR_FN(IntelI7SandyErrorControl); + CPUM_ASSERT_RD_MSR_FN(IntelI7VirtualLegacyWireCap); + CPUM_ASSERT_RD_MSR_FN(IntelI7PowerCtl); + CPUM_ASSERT_RD_MSR_FN(IntelI7SandyPebsNumAlt); + CPUM_ASSERT_RD_MSR_FN(IntelI7PebsLdLat); + CPUM_ASSERT_RD_MSR_FN(IntelI7PkgCnResidencyN); + CPUM_ASSERT_RD_MSR_FN(IntelI7CoreCnResidencyN); + CPUM_ASSERT_RD_MSR_FN(IntelI7SandyVrCurrentConfig); + CPUM_ASSERT_RD_MSR_FN(IntelI7SandyVrMiscConfig); + CPUM_ASSERT_RD_MSR_FN(IntelI7SandyRaplPowerUnit); + CPUM_ASSERT_RD_MSR_FN(IntelI7SandyPkgCnIrtlN); + CPUM_ASSERT_RD_MSR_FN(IntelI7SandyPkgC2Residency); + CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPkgPowerLimit); + CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPkgEnergyStatus); + CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPkgPerfStatus); + CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPkgPowerInfo); + CPUM_ASSERT_RD_MSR_FN(IntelI7RaplDramPowerLimit); + CPUM_ASSERT_RD_MSR_FN(IntelI7RaplDramEnergyStatus); + CPUM_ASSERT_RD_MSR_FN(IntelI7RaplDramPerfStatus); + CPUM_ASSERT_RD_MSR_FN(IntelI7RaplDramPowerInfo); + CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPp0PowerLimit); + CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPp0EnergyStatus); + CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPp0Policy); + CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPp0PerfStatus); + CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPp1PowerLimit); + CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPp1EnergyStatus); + CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPp1Policy); + CPUM_ASSERT_RD_MSR_FN(IntelI7IvyConfigTdpNominal); + CPUM_ASSERT_RD_MSR_FN(IntelI7IvyConfigTdpLevel1); + CPUM_ASSERT_RD_MSR_FN(IntelI7IvyConfigTdpLevel2); + CPUM_ASSERT_RD_MSR_FN(IntelI7IvyConfigTdpControl); + CPUM_ASSERT_RD_MSR_FN(IntelI7IvyTurboActivationRatio); + CPUM_ASSERT_RD_MSR_FN(IntelI7UncPerfGlobalCtrl); + CPUM_ASSERT_RD_MSR_FN(IntelI7UncPerfGlobalStatus); + CPUM_ASSERT_RD_MSR_FN(IntelI7UncPerfGlobalOvfCtrl); + CPUM_ASSERT_RD_MSR_FN(IntelI7UncPerfFixedCtrCtrl); + CPUM_ASSERT_RD_MSR_FN(IntelI7UncPerfFixedCtr); + CPUM_ASSERT_RD_MSR_FN(IntelI7UncCBoxConfig); + CPUM_ASSERT_RD_MSR_FN(IntelI7UncArbPerfCtrN); + CPUM_ASSERT_RD_MSR_FN(IntelI7UncArbPerfEvtSelN); + CPUM_ASSERT_RD_MSR_FN(IntelCore2EmttmCrTablesN); + CPUM_ASSERT_RD_MSR_FN(IntelCore2SmmCStMiscInfo); + CPUM_ASSERT_RD_MSR_FN(IntelCore1ExtConfig); + CPUM_ASSERT_RD_MSR_FN(IntelCore1DtsCalControl); + CPUM_ASSERT_RD_MSR_FN(IntelCore2PeciControl); + + CPUM_ASSERT_RD_MSR_FN(P6LastBranchFromIp); + CPUM_ASSERT_RD_MSR_FN(P6LastBranchToIp); + CPUM_ASSERT_RD_MSR_FN(P6LastIntFromIp); + CPUM_ASSERT_RD_MSR_FN(P6LastIntToIp); + + CPUM_ASSERT_RD_MSR_FN(AmdFam15hTscRate); + CPUM_ASSERT_RD_MSR_FN(AmdFam15hLwpCfg); + CPUM_ASSERT_RD_MSR_FN(AmdFam15hLwpCbAddr); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hMc4MiscN); + CPUM_ASSERT_RD_MSR_FN(AmdK8PerfCtlN); + CPUM_ASSERT_RD_MSR_FN(AmdK8PerfCtrN); + CPUM_ASSERT_RD_MSR_FN(AmdK8SysCfg); + CPUM_ASSERT_RD_MSR_FN(AmdK8HwCr); + CPUM_ASSERT_RD_MSR_FN(AmdK8IorrBaseN); + CPUM_ASSERT_RD_MSR_FN(AmdK8IorrMaskN); + CPUM_ASSERT_RD_MSR_FN(AmdK8TopOfMemN); + CPUM_ASSERT_RD_MSR_FN(AmdK8NbCfg1); + CPUM_ASSERT_RD_MSR_FN(AmdK8McXcptRedir); + CPUM_ASSERT_RD_MSR_FN(AmdK8CpuNameN); + CPUM_ASSERT_RD_MSR_FN(AmdK8HwThermalCtrl); + CPUM_ASSERT_RD_MSR_FN(AmdK8SwThermalCtrl); + CPUM_ASSERT_RD_MSR_FN(AmdK8FidVidControl); + CPUM_ASSERT_RD_MSR_FN(AmdK8FidVidStatus); + CPUM_ASSERT_RD_MSR_FN(AmdK8McCtlMaskN); + CPUM_ASSERT_RD_MSR_FN(AmdK8SmiOnIoTrapN); + CPUM_ASSERT_RD_MSR_FN(AmdK8SmiOnIoTrapCtlSts); + CPUM_ASSERT_RD_MSR_FN(AmdK8IntPendingMessage); + CPUM_ASSERT_RD_MSR_FN(AmdK8SmiTriggerIoCycle); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hMmioCfgBaseAddr); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hTrapCtlMaybe); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hPStateCurLimit); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hPStateControl); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hPStateStatus); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hPStateN); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hCofVidControl); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hCofVidStatus); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hCStateIoBaseAddr); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hCpuWatchdogTimer); + CPUM_ASSERT_RD_MSR_FN(AmdK8SmmBase); + CPUM_ASSERT_RD_MSR_FN(AmdK8SmmAddr); + CPUM_ASSERT_RD_MSR_FN(AmdK8SmmMask); + CPUM_ASSERT_RD_MSR_FN(AmdK8VmCr); + CPUM_ASSERT_RD_MSR_FN(AmdK8IgnNe); + CPUM_ASSERT_RD_MSR_FN(AmdK8SmmCtl); + CPUM_ASSERT_RD_MSR_FN(AmdK8VmHSavePa); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hVmLockKey); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hSmmLockKey); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hLocalSmiStatus); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hOsVisWrkIdLength); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hOsVisWrkStatus); + CPUM_ASSERT_RD_MSR_FN(AmdFam16hL2IPerfCtlN); + CPUM_ASSERT_RD_MSR_FN(AmdFam16hL2IPerfCtrN); + CPUM_ASSERT_RD_MSR_FN(AmdFam15hNorthbridgePerfCtlN); + CPUM_ASSERT_RD_MSR_FN(AmdFam15hNorthbridgePerfCtrN); + CPUM_ASSERT_RD_MSR_FN(AmdK7MicrocodeCtl); + CPUM_ASSERT_RD_MSR_FN(AmdK7ClusterIdMaybe); + CPUM_ASSERT_RD_MSR_FN(AmdK8CpuIdCtlStd07hEbax); + CPUM_ASSERT_RD_MSR_FN(AmdK8CpuIdCtlStd06hEcx); + CPUM_ASSERT_RD_MSR_FN(AmdK8CpuIdCtlStd01hEdcx); + CPUM_ASSERT_RD_MSR_FN(AmdK8CpuIdCtlExt01hEdcx); + CPUM_ASSERT_RD_MSR_FN(AmdK8PatchLevel); + CPUM_ASSERT_RD_MSR_FN(AmdK7DebugStatusMaybe); + CPUM_ASSERT_RD_MSR_FN(AmdK7BHTraceBaseMaybe); + CPUM_ASSERT_RD_MSR_FN(AmdK7BHTracePtrMaybe); + CPUM_ASSERT_RD_MSR_FN(AmdK7BHTraceLimitMaybe); + CPUM_ASSERT_RD_MSR_FN(AmdK7HardwareDebugToolCfgMaybe); + CPUM_ASSERT_RD_MSR_FN(AmdK7FastFlushCountMaybe); + CPUM_ASSERT_RD_MSR_FN(AmdK7NodeId); + CPUM_ASSERT_RD_MSR_FN(AmdK7DrXAddrMaskN); + CPUM_ASSERT_RD_MSR_FN(AmdK7Dr0DataMatchMaybe); + CPUM_ASSERT_RD_MSR_FN(AmdK7Dr0DataMaskMaybe); + CPUM_ASSERT_RD_MSR_FN(AmdK7LoadStoreCfg); + CPUM_ASSERT_RD_MSR_FN(AmdK7InstrCacheCfg); + CPUM_ASSERT_RD_MSR_FN(AmdK7DataCacheCfg); + CPUM_ASSERT_RD_MSR_FN(AmdK7BusUnitCfg); + CPUM_ASSERT_RD_MSR_FN(AmdK7DebugCtl2Maybe); + CPUM_ASSERT_RD_MSR_FN(AmdFam15hFpuCfg); + CPUM_ASSERT_RD_MSR_FN(AmdFam15hDecoderCfg); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hBusUnitCfg2); + CPUM_ASSERT_RD_MSR_FN(AmdFam15hCombUnitCfg); + CPUM_ASSERT_RD_MSR_FN(AmdFam15hCombUnitCfg2); + CPUM_ASSERT_RD_MSR_FN(AmdFam15hCombUnitCfg3); + CPUM_ASSERT_RD_MSR_FN(AmdFam15hExecUnitCfg); + CPUM_ASSERT_RD_MSR_FN(AmdFam15hLoadStoreCfg2); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsFetchCtl); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsFetchLinAddr); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsFetchPhysAddr); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsOpExecCtl); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsOpRip); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsOpData); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsOpData2); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsOpData3); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsDcLinAddr); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsDcPhysAddr); + CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsCtl); + CPUM_ASSERT_RD_MSR_FN(AmdFam14hIbsBrTarget); + + AssertReturn(g_aCpumWrMsrFns[kCpumMsrWrFn_Invalid] == NULL, VERR_CPUM_IPE_2); + CPUM_ASSERT_WR_MSR_FN(Ia32P5McAddr); + CPUM_ASSERT_WR_MSR_FN(Ia32P5McType); + CPUM_ASSERT_WR_MSR_FN(Ia32TimestampCounter); + CPUM_ASSERT_WR_MSR_FN(Ia32ApicBase); + CPUM_ASSERT_WR_MSR_FN(Ia32FeatureControl); + CPUM_ASSERT_WR_MSR_FN(Ia32BiosSignId); + CPUM_ASSERT_WR_MSR_FN(Ia32BiosUpdateTrigger); + CPUM_ASSERT_WR_MSR_FN(Ia32SmmMonitorCtl); + CPUM_ASSERT_WR_MSR_FN(Ia32PmcN); + CPUM_ASSERT_WR_MSR_FN(Ia32MonitorFilterLineSize); + CPUM_ASSERT_WR_MSR_FN(Ia32MPerf); + CPUM_ASSERT_WR_MSR_FN(Ia32APerf); + CPUM_ASSERT_WR_MSR_FN(Ia32MtrrPhysBaseN); + CPUM_ASSERT_WR_MSR_FN(Ia32MtrrPhysMaskN); + CPUM_ASSERT_WR_MSR_FN(Ia32MtrrFixed); + CPUM_ASSERT_WR_MSR_FN(Ia32MtrrDefType); + CPUM_ASSERT_WR_MSR_FN(Ia32Pat); + CPUM_ASSERT_WR_MSR_FN(Ia32SysEnterCs); + CPUM_ASSERT_WR_MSR_FN(Ia32SysEnterEsp); + CPUM_ASSERT_WR_MSR_FN(Ia32SysEnterEip); + CPUM_ASSERT_WR_MSR_FN(Ia32McgStatus); + CPUM_ASSERT_WR_MSR_FN(Ia32McgCtl); + CPUM_ASSERT_WR_MSR_FN(Ia32DebugCtl); + CPUM_ASSERT_WR_MSR_FN(Ia32SmrrPhysBase); + CPUM_ASSERT_WR_MSR_FN(Ia32SmrrPhysMask); + CPUM_ASSERT_WR_MSR_FN(Ia32PlatformDcaCap); + CPUM_ASSERT_WR_MSR_FN(Ia32Dca0Cap); + CPUM_ASSERT_WR_MSR_FN(Ia32PerfEvtSelN); + CPUM_ASSERT_WR_MSR_FN(Ia32PerfStatus); + CPUM_ASSERT_WR_MSR_FN(Ia32PerfCtl); + CPUM_ASSERT_WR_MSR_FN(Ia32FixedCtrN); + CPUM_ASSERT_WR_MSR_FN(Ia32PerfCapabilities); + CPUM_ASSERT_WR_MSR_FN(Ia32FixedCtrCtrl); + CPUM_ASSERT_WR_MSR_FN(Ia32PerfGlobalStatus); + CPUM_ASSERT_WR_MSR_FN(Ia32PerfGlobalCtrl); + CPUM_ASSERT_WR_MSR_FN(Ia32PerfGlobalOvfCtrl); + CPUM_ASSERT_WR_MSR_FN(Ia32PebsEnable); + CPUM_ASSERT_WR_MSR_FN(Ia32ClockModulation); + CPUM_ASSERT_WR_MSR_FN(Ia32ThermInterrupt); + CPUM_ASSERT_WR_MSR_FN(Ia32ThermStatus); + CPUM_ASSERT_WR_MSR_FN(Ia32MiscEnable); + CPUM_ASSERT_WR_MSR_FN(Ia32McCtlStatusAddrMiscN); + CPUM_ASSERT_WR_MSR_FN(Ia32McNCtl2); + CPUM_ASSERT_WR_MSR_FN(Ia32DsArea); + CPUM_ASSERT_WR_MSR_FN(Ia32TscDeadline); + CPUM_ASSERT_WR_MSR_FN(Ia32X2ApicN); + CPUM_ASSERT_WR_MSR_FN(Ia32DebugInterface); + + CPUM_ASSERT_WR_MSR_FN(Amd64Efer); + CPUM_ASSERT_WR_MSR_FN(Amd64SyscallTarget); + CPUM_ASSERT_WR_MSR_FN(Amd64LongSyscallTarget); + CPUM_ASSERT_WR_MSR_FN(Amd64CompSyscallTarget); + CPUM_ASSERT_WR_MSR_FN(Amd64SyscallFlagMask); + CPUM_ASSERT_WR_MSR_FN(Amd64FsBase); + CPUM_ASSERT_WR_MSR_FN(Amd64GsBase); + CPUM_ASSERT_WR_MSR_FN(Amd64KernelGsBase); + CPUM_ASSERT_WR_MSR_FN(Amd64TscAux); + + CPUM_ASSERT_WR_MSR_FN(IntelEblCrPowerOn); + CPUM_ASSERT_WR_MSR_FN(IntelP4EbcHardPowerOn); + CPUM_ASSERT_WR_MSR_FN(IntelP4EbcSoftPowerOn); + CPUM_ASSERT_WR_MSR_FN(IntelP4EbcFrequencyId); + CPUM_ASSERT_WR_MSR_FN(IntelFlexRatio); + CPUM_ASSERT_WR_MSR_FN(IntelPkgCStConfigControl); + CPUM_ASSERT_WR_MSR_FN(IntelPmgIoCaptureBase); + CPUM_ASSERT_WR_MSR_FN(IntelLastBranchFromToN); + CPUM_ASSERT_WR_MSR_FN(IntelLastBranchFromN); + CPUM_ASSERT_WR_MSR_FN(IntelLastBranchToN); + CPUM_ASSERT_WR_MSR_FN(IntelLastBranchTos); + CPUM_ASSERT_WR_MSR_FN(IntelBblCrCtl); + CPUM_ASSERT_WR_MSR_FN(IntelBblCrCtl3); + CPUM_ASSERT_WR_MSR_FN(IntelI7TemperatureTarget); + CPUM_ASSERT_WR_MSR_FN(IntelI7MsrOffCoreResponseN); + CPUM_ASSERT_WR_MSR_FN(IntelI7MiscPwrMgmt); + CPUM_ASSERT_WR_MSR_FN(IntelP6CrN); + CPUM_ASSERT_WR_MSR_FN(IntelCpuId1FeatureMaskEcdx); + CPUM_ASSERT_WR_MSR_FN(IntelCpuId1FeatureMaskEax); + CPUM_ASSERT_WR_MSR_FN(IntelCpuId80000001FeatureMaskEcdx); + CPUM_ASSERT_WR_MSR_FN(IntelI7SandyAesNiCtl); + CPUM_ASSERT_WR_MSR_FN(IntelI7TurboRatioLimit); + CPUM_ASSERT_WR_MSR_FN(IntelI7LbrSelect); + CPUM_ASSERT_WR_MSR_FN(IntelI7SandyErrorControl); + CPUM_ASSERT_WR_MSR_FN(IntelI7PowerCtl); + CPUM_ASSERT_WR_MSR_FN(IntelI7SandyPebsNumAlt); + CPUM_ASSERT_WR_MSR_FN(IntelI7PebsLdLat); + CPUM_ASSERT_WR_MSR_FN(IntelI7SandyVrCurrentConfig); + CPUM_ASSERT_WR_MSR_FN(IntelI7SandyVrMiscConfig); + CPUM_ASSERT_WR_MSR_FN(IntelI7SandyPkgCnIrtlN); + CPUM_ASSERT_WR_MSR_FN(IntelI7RaplPkgPowerLimit); + CPUM_ASSERT_WR_MSR_FN(IntelI7RaplDramPowerLimit); + CPUM_ASSERT_WR_MSR_FN(IntelI7RaplPp0PowerLimit); + CPUM_ASSERT_WR_MSR_FN(IntelI7RaplPp0Policy); + CPUM_ASSERT_WR_MSR_FN(IntelI7RaplPp1PowerLimit); + CPUM_ASSERT_WR_MSR_FN(IntelI7RaplPp1Policy); + CPUM_ASSERT_WR_MSR_FN(IntelI7IvyConfigTdpControl); + CPUM_ASSERT_WR_MSR_FN(IntelI7IvyTurboActivationRatio); + CPUM_ASSERT_WR_MSR_FN(IntelI7UncPerfGlobalCtrl); + CPUM_ASSERT_WR_MSR_FN(IntelI7UncPerfGlobalStatus); + CPUM_ASSERT_WR_MSR_FN(IntelI7UncPerfGlobalOvfCtrl); + CPUM_ASSERT_WR_MSR_FN(IntelI7UncPerfFixedCtrCtrl); + CPUM_ASSERT_WR_MSR_FN(IntelI7UncPerfFixedCtr); + CPUM_ASSERT_WR_MSR_FN(IntelI7UncArbPerfCtrN); + CPUM_ASSERT_WR_MSR_FN(IntelI7UncArbPerfEvtSelN); + CPUM_ASSERT_WR_MSR_FN(IntelCore2EmttmCrTablesN); + CPUM_ASSERT_WR_MSR_FN(IntelCore2SmmCStMiscInfo); + CPUM_ASSERT_WR_MSR_FN(IntelCore1ExtConfig); + CPUM_ASSERT_WR_MSR_FN(IntelCore1DtsCalControl); + CPUM_ASSERT_WR_MSR_FN(IntelCore2PeciControl); + + CPUM_ASSERT_WR_MSR_FN(P6LastIntFromIp); + CPUM_ASSERT_WR_MSR_FN(P6LastIntToIp); + + CPUM_ASSERT_WR_MSR_FN(AmdFam15hTscRate); + CPUM_ASSERT_WR_MSR_FN(AmdFam15hLwpCfg); + CPUM_ASSERT_WR_MSR_FN(AmdFam15hLwpCbAddr); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hMc4MiscN); + CPUM_ASSERT_WR_MSR_FN(AmdK8PerfCtlN); + CPUM_ASSERT_WR_MSR_FN(AmdK8PerfCtrN); + CPUM_ASSERT_WR_MSR_FN(AmdK8SysCfg); + CPUM_ASSERT_WR_MSR_FN(AmdK8HwCr); + CPUM_ASSERT_WR_MSR_FN(AmdK8IorrBaseN); + CPUM_ASSERT_WR_MSR_FN(AmdK8IorrMaskN); + CPUM_ASSERT_WR_MSR_FN(AmdK8TopOfMemN); + CPUM_ASSERT_WR_MSR_FN(AmdK8NbCfg1); + CPUM_ASSERT_WR_MSR_FN(AmdK8McXcptRedir); + CPUM_ASSERT_WR_MSR_FN(AmdK8CpuNameN); + CPUM_ASSERT_WR_MSR_FN(AmdK8HwThermalCtrl); + CPUM_ASSERT_WR_MSR_FN(AmdK8SwThermalCtrl); + CPUM_ASSERT_WR_MSR_FN(AmdK8FidVidControl); + CPUM_ASSERT_WR_MSR_FN(AmdK8McCtlMaskN); + CPUM_ASSERT_WR_MSR_FN(AmdK8SmiOnIoTrapN); + CPUM_ASSERT_WR_MSR_FN(AmdK8SmiOnIoTrapCtlSts); + CPUM_ASSERT_WR_MSR_FN(AmdK8IntPendingMessage); + CPUM_ASSERT_WR_MSR_FN(AmdK8SmiTriggerIoCycle); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hMmioCfgBaseAddr); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hTrapCtlMaybe); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hPStateControl); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hPStateStatus); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hPStateN); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hCofVidControl); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hCofVidStatus); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hCStateIoBaseAddr); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hCpuWatchdogTimer); + CPUM_ASSERT_WR_MSR_FN(AmdK8SmmBase); + CPUM_ASSERT_WR_MSR_FN(AmdK8SmmAddr); + CPUM_ASSERT_WR_MSR_FN(AmdK8SmmMask); + CPUM_ASSERT_WR_MSR_FN(AmdK8VmCr); + CPUM_ASSERT_WR_MSR_FN(AmdK8IgnNe); + CPUM_ASSERT_WR_MSR_FN(AmdK8SmmCtl); + CPUM_ASSERT_WR_MSR_FN(AmdK8VmHSavePa); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hVmLockKey); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hSmmLockKey); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hLocalSmiStatus); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hOsVisWrkIdLength); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hOsVisWrkStatus); + CPUM_ASSERT_WR_MSR_FN(AmdFam16hL2IPerfCtlN); + CPUM_ASSERT_WR_MSR_FN(AmdFam16hL2IPerfCtrN); + CPUM_ASSERT_WR_MSR_FN(AmdFam15hNorthbridgePerfCtlN); + CPUM_ASSERT_WR_MSR_FN(AmdFam15hNorthbridgePerfCtrN); + CPUM_ASSERT_WR_MSR_FN(AmdK7MicrocodeCtl); + CPUM_ASSERT_WR_MSR_FN(AmdK7ClusterIdMaybe); + CPUM_ASSERT_WR_MSR_FN(AmdK8CpuIdCtlStd07hEbax); + CPUM_ASSERT_WR_MSR_FN(AmdK8CpuIdCtlStd06hEcx); + CPUM_ASSERT_WR_MSR_FN(AmdK8CpuIdCtlStd01hEdcx); + CPUM_ASSERT_WR_MSR_FN(AmdK8CpuIdCtlExt01hEdcx); + CPUM_ASSERT_WR_MSR_FN(AmdK8PatchLoader); + CPUM_ASSERT_WR_MSR_FN(AmdK7DebugStatusMaybe); + CPUM_ASSERT_WR_MSR_FN(AmdK7BHTraceBaseMaybe); + CPUM_ASSERT_WR_MSR_FN(AmdK7BHTracePtrMaybe); + CPUM_ASSERT_WR_MSR_FN(AmdK7BHTraceLimitMaybe); + CPUM_ASSERT_WR_MSR_FN(AmdK7HardwareDebugToolCfgMaybe); + CPUM_ASSERT_WR_MSR_FN(AmdK7FastFlushCountMaybe); + CPUM_ASSERT_WR_MSR_FN(AmdK7NodeId); + CPUM_ASSERT_WR_MSR_FN(AmdK7DrXAddrMaskN); + CPUM_ASSERT_WR_MSR_FN(AmdK7Dr0DataMatchMaybe); + CPUM_ASSERT_WR_MSR_FN(AmdK7Dr0DataMaskMaybe); + CPUM_ASSERT_WR_MSR_FN(AmdK7LoadStoreCfg); + CPUM_ASSERT_WR_MSR_FN(AmdK7InstrCacheCfg); + CPUM_ASSERT_WR_MSR_FN(AmdK7DataCacheCfg); + CPUM_ASSERT_WR_MSR_FN(AmdK7BusUnitCfg); + CPUM_ASSERT_WR_MSR_FN(AmdK7DebugCtl2Maybe); + CPUM_ASSERT_WR_MSR_FN(AmdFam15hFpuCfg); + CPUM_ASSERT_WR_MSR_FN(AmdFam15hDecoderCfg); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hBusUnitCfg2); + CPUM_ASSERT_WR_MSR_FN(AmdFam15hCombUnitCfg); + CPUM_ASSERT_WR_MSR_FN(AmdFam15hCombUnitCfg2); + CPUM_ASSERT_WR_MSR_FN(AmdFam15hCombUnitCfg3); + CPUM_ASSERT_WR_MSR_FN(AmdFam15hExecUnitCfg); + CPUM_ASSERT_WR_MSR_FN(AmdFam15hLoadStoreCfg2); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsFetchCtl); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsFetchLinAddr); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsFetchPhysAddr); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsOpExecCtl); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsOpRip); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsOpData); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsOpData2); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsOpData3); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsDcLinAddr); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsDcPhysAddr); + CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsCtl); + CPUM_ASSERT_WR_MSR_FN(AmdFam14hIbsBrTarget); + + return VINF_SUCCESS; +} +#endif /* VBOX_STRICT && IN_RING3 */ + + +/** + * Gets the scalable bus frequency. + * + * The bus frequency is used as a base in several MSRs that gives the CPU and + * other frequency ratios. + * + * @returns Scalable bus frequency in Hz. Will not return CPUM_SBUSFREQ_UNKNOWN. + * @param pVM Pointer to the shared VM structure. + */ +VMMDECL(uint64_t) CPUMGetGuestScalableBusFrequency(PVM pVM) +{ + uint64_t uFreq = pVM->cpum.s.GuestInfo.uScalableBusFreq; + if (uFreq == CPUM_SBUSFREQ_UNKNOWN) + uFreq = CPUM_SBUSFREQ_100MHZ; + return uFreq; +} + + +#ifdef IN_RING0 + +/** + * Fast way for HM to access the MSR_K8_TSC_AUX register. + * + * @returns The register value. + * @param pVCpu Pointer to the cross context CPU structure for + * the calling EMT. + * @thread EMT(pVCpu) + */ +VMMR0_INT_DECL(uint64_t) CPUMR0GetGuestTscAux(PVMCPU pVCpu) +{ + return pVCpu->cpum.s.GuestMsrs.msr.TscAux; +} + + +/** + * Fast way for HM to access the MSR_K8_TSC_AUX register. + * + * @param pVCpu Pointer to the cross context CPU structure for + * the calling EMT. + * @param uValue The new value. + * @thread EMT(pVCpu) + */ +VMMR0_INT_DECL(void) CPUMR0SetGuestTscAux(PVMCPU pVCpu, uint64_t uValue) +{ + pVCpu->cpum.s.GuestMsrs.msr.TscAux = uValue; +} + +#endif /* IN_RING0 */ diff --git a/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp b/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp index de80e4a6..7a0b4c5b 100644 --- a/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp +++ b/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2012 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -26,6 +26,7 @@ #include <VBox/vmm/pdm.h> #include <VBox/vmm/pgm.h> #include <VBox/vmm/mm.h> +#include <VBox/vmm/em.h> #if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0) # include <VBox/vmm/selm.h> #endif @@ -34,7 +35,7 @@ #include <VBox/err.h> #include <VBox/dis.h> #include <VBox/log.h> -#include <VBox/vmm/hwaccm.h> +#include <VBox/vmm/hm.h> #include <VBox/vmm/tm.h> #include <iprt/assert.h> #include <iprt/asm.h> @@ -88,7 +89,7 @@ static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg) { Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); - Assert(!HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM))); + Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM))); Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT); if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM) @@ -315,45 +316,84 @@ VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR) } +/** @MAYBE_LOAD_DRx + * Macro for updating DRx values in raw-mode and ring-0 contexts. + */ +#ifdef IN_RING0 +# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) +# ifndef VBOX_WITH_HYBRID_32BIT_KERNEL +# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \ + do { \ + if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \ + a_fnLoad(a_uValue); \ + else \ + (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \ + } while (0) +# else +# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \ + do { \ + /** @todo we're not loading the correct guest value here! */ \ + a_fnLoad(a_uValue); \ + } while (0) +# endif +# else +# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \ + do { \ + a_fnLoad(a_uValue); \ + } while (0) +# endif + +#elif defined(IN_RC) +# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \ + do { \ + if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \ + { a_fnLoad(a_uValue); } \ + } while (0) + +#else +# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0) +#endif + VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0) { pVCpu->cpum.s.Hyper.dr[0] = uDr0; - /** @todo in GC we must load it! */ + MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0); } VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1) { pVCpu->cpum.s.Hyper.dr[1] = uDr1; - /** @todo in GC we must load it! */ + MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1); } VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2) { pVCpu->cpum.s.Hyper.dr[2] = uDr2; - /** @todo in GC we must load it! */ + MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2); } VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3) { pVCpu->cpum.s.Hyper.dr[3] = uDr3; - /** @todo in GC we must load it! */ + MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3); } VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6) { pVCpu->cpum.s.Hyper.dr[6] = uDr6; - /** @todo in GC we must load it! */ } VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7) { pVCpu->cpum.s.Hyper.dr[7] = uDr7; - /** @todo in GC we must load it! */ +#ifdef IN_RC + MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7); +#endif } @@ -544,7 +584,7 @@ VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit { #ifdef VBOX_WITH_IEM # ifdef VBOX_WITH_RAW_MODE_NOT_R0 - if (!HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM))) + if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM))) VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); # endif #endif @@ -558,7 +598,7 @@ VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit { #ifdef VBOX_WITH_IEM # ifdef VBOX_WITH_RAW_MODE_NOT_R0 - if (!HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM))) + if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM))) VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT); # endif #endif @@ -572,7 +612,7 @@ VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr) { #ifdef VBOX_WITH_IEM # ifdef VBOX_WITH_RAW_MODE_NOT_R0 - if (!HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM))) + if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM))) VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); # endif #endif @@ -587,7 +627,7 @@ VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr) # ifdef VBOX_WITH_RAW_MODE_NOT_R0 if ( ( ldtr != 0 || pVCpu->cpum.s.Guest.ldtr.Sel != 0) - && !HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM))) + && !HMIsEnabled(pVCpu->CTX_SUFF(pVM))) VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT); # endif #endif @@ -634,7 +674,7 @@ VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0) AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0)); HyperCR0 &= ~X86_CR0_EM; HyperCR0 |= cr0 & X86_CR0_EM; - Log(("CPUM New HyperCR0=%#x\n", HyperCR0)); + Log(("CPUM: New HyperCR0=%#x\n", HyperCR0)); ASMSetCR0(HyperCR0); } # ifdef VBOX_STRICT @@ -658,7 +698,7 @@ VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0) ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0)); HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP); HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP); - Log(("CPUM New HyperCR0=%#x\n", HyperCR0)); + Log(("CPUM: New HyperCR0=%#x\n", HyperCR0)); ASMSetCR0(HyperCR0); } } @@ -673,6 +713,12 @@ VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0) pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH; pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0; + /* + * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack) + */ + if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP)) + PGMCr0WpEnabled(pVCpu); + pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET; return VINF_SUCCESS; } @@ -823,24 +869,18 @@ VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val) pVCpu->cpum.s.Guest.msrEFER = val; } +#ifndef VBOX_WITH_NEW_MSR_CODE /** - * Query an MSR. - * - * The caller is responsible for checking privilege if the call is the result - * of a RDMSR instruction. We'll do the rest. - * - * @retval VINF_SUCCESS on success. - * @retval VERR_CPUM_RAISE_GP_0 on failure (invalid MSR), the caller is - * expected to take the appropriate actions. @a *puValue is set to 0. - * @param pVCpu Pointer to the VMCPU. - * @param idMsr The MSR. - * @param puValue Where to return the value. + * Worker for CPUMQueryGuestMsr(). * - * @remarks This will always return the right values, even when we're in the - * recompiler. + * @retval VINF_SUCCESS + * @retval VERR_CPUM_RAISE_GP_0 + * @param pVCpu The cross context CPU structure. + * @param idMsr The MSR to read. + * @param puValue Where to store the return value. */ -VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue) +static int cpumQueryGuestMsrInt(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue) { /* * If we don't indicate MSR support in the CPUID feature bits, indicate @@ -861,15 +901,18 @@ VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue) break; case MSR_IA32_APICBASE: - rc = PDMApicGetBase(pVCpu->CTX_SUFF(pVM), puValue); - if (RT_SUCCESS(rc)) - rc = VINF_SUCCESS; + { + /* See @bugref{7097} comment 6. */ + PVM pVM = pVCpu->CTX_SUFF(pVM); + if (PDMHasApic(pVM)) + *puValue = pVCpu->cpum.s.Guest.msrApicBase; else { - *puValue = 0; rc = VERR_CPUM_RAISE_GP_0; + *puValue = 0; } break; + } case MSR_IA32_CR_PAT: *puValue = pVCpu->cpum.s.Guest.msrPAT; @@ -901,6 +944,24 @@ VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue) break; } + case IA32_MTRR_PHYSBASE0: case IA32_MTRR_PHYSMASK0: + case IA32_MTRR_PHYSBASE1: case IA32_MTRR_PHYSMASK1: + case IA32_MTRR_PHYSBASE2: case IA32_MTRR_PHYSMASK2: + case IA32_MTRR_PHYSBASE3: case IA32_MTRR_PHYSMASK3: + case IA32_MTRR_PHYSBASE4: case IA32_MTRR_PHYSMASK4: + case IA32_MTRR_PHYSBASE5: case IA32_MTRR_PHYSMASK5: + case IA32_MTRR_PHYSBASE6: case IA32_MTRR_PHYSMASK6: + case IA32_MTRR_PHYSBASE7: case IA32_MTRR_PHYSMASK7: + /** @todo implement variable MTRRs. */ + *puValue = 0; + break; +#if 0 /** @todo newer CPUs have more, figure since when and do selective GP(). */ + case IA32_MTRR_PHYSBASE8: case IA32_MTRR_PHYSMASK8: + case IA32_MTRR_PHYSBASE9: case IA32_MTRR_PHYSMASK9: + *puValue = 0; + break; +#endif + case MSR_IA32_MTRR_DEF_TYPE: *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType; break; @@ -976,7 +1037,9 @@ VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue) break; case MSR_IA32_PERF_STATUS: - /** @todo could really be not exactly correct, maybe use host's values */ + /** @todo could really be not exactly correct, maybe use host's values + * Apple code indicates that we should use CPU Hz / 1.333MHz here. */ + /** @todo Where are the specs implemented here found? */ *puValue = UINT64_C(1000) /* TSC increment by tick */ | ((uint64_t)u8Multiplier << 24) /* CPU multiplier (aka bus ratio) min */ | ((uint64_t)u8Multiplier << 40) /* CPU multiplier (aka bus ratio) max */; @@ -995,7 +1058,7 @@ VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue) break; case MSR_IA32_PLATFORM_INFO: - *puValue = (u8Multiplier << 8) /* Flex ratio max */ + *puValue = ((uint32_t)u8Multiplier << 8) /* Flex ratio max */ | ((uint64_t)u8Multiplier << 40) /* Flex ratio min */; break; @@ -1020,6 +1083,11 @@ VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue) #endif break; + /** @todo virtualize DEBUGCTL and relatives */ + case MSR_IA32_DEBUGCTL: + *puValue = 0; + break; + #if 0 /*def IN_RING0 */ case MSR_IA32_PLATFORM_ID: case MSR_IA32_BIOS_SIGN_ID: @@ -1034,32 +1102,138 @@ VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue) } /* no break */ #endif + /* + * The BIOS_SIGN_ID MSR and MSR_IA32_MCP_CAP et al exist on AMD64 as + * well, at least bulldozer have them. Windows 7 is querying them. + * XP has been observed querying MSR_IA32_MC0_CTL. + * XP64 has been observed querying MSR_P4_LASTBRANCH_0 (also on AMD). + */ + case MSR_IA32_BIOS_SIGN_ID: /* fam/mod >= 6_01 */ + case MSR_IA32_MCG_CAP: /* fam/mod >= 6_01 */ + case MSR_IA32_MCG_STATUS: /* indicated as not present in CAP */ + /*case MSR_IA32_MCG_CTRL: - indicated as not present in CAP */ + case MSR_IA32_MC0_CTL: + case MSR_IA32_MC0_STATUS: + case MSR_P4_LASTBRANCH_0: + case MSR_P4_LASTBRANCH_1: + case MSR_P4_LASTBRANCH_2: + case MSR_P4_LASTBRANCH_3: + case 0x2c: /* accessed by some Intel driver but also read on + AMD systems */ + *puValue = 0; + break; + /* * Intel specifics MSRs: */ + case MSR_P5_MC_ADDR: + case MSR_P5_MC_TYPE: + case MSR_P4_LASTBRANCH_TOS: /** @todo Are these branch regs still here on more recent CPUs? The documentation doesn't mention them for several archs. */ + case MSR_IA32_PERFEVTSEL0: /* NetWare 6.5 wants the these four. (Bet on AMD as well.) */ + case MSR_IA32_PERFEVTSEL1: + case MSR_IA32_PMC0: + case MSR_IA32_PMC1: case MSR_IA32_PLATFORM_ID: /* fam/mod >= 6_01 */ - case MSR_IA32_BIOS_SIGN_ID: /* fam/mod >= 6_01 */ + case MSR_IA32_MPERF: /* intel_pstate depends on this but does a validation test */ + case MSR_IA32_APERF: /* intel_pstate depends on this but does a validation test */ /*case MSR_IA32_BIOS_UPDT_TRIG: - write-only? */ - case MSR_IA32_MCP_CAP: /* fam/mod >= 6_01 */ - /*case MSR_IA32_MCP_STATUS: - indicated as not present in CAP */ - /*case MSR_IA32_MCP_CTRL: - indicated as not present in CAP */ - case MSR_IA32_MC0_CTL: - case MSR_IA32_MC0_STATUS: + case MSR_RAPL_POWER_UNIT: + case MSR_BBL_CR_CTL3: /* ca. core arch? */ + case MSR_PKG_CST_CONFIG_CONTROL: /* Nahalem, Sandy Bridge */ + case MSR_CORE_THREAD_COUNT: /* Apple queries this. */ + case MSR_FLEX_RATIO: /* Apple queries this. */ + case 0x1ad: /* MSR_TURBO_POWER_CURRENT_LIMIT */ *puValue = 0; if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_INTEL) { - Log(("MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr)); + Log(("CPUM: MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr)); rc = VERR_CPUM_RAISE_GP_0; + break; + } + + /* Provide more plausive values for some of them. */ + switch (idMsr) + { + case MSR_RAPL_POWER_UNIT: + *puValue = RT_MAKE_U32_FROM_U8(3 /* power units (1/8 W)*/, + 16 /* 15.3 micro-Joules */, + 10 /* 976 microseconds increments */, + 0); + break; + case MSR_BBL_CR_CTL3: + *puValue = RT_MAKE_U32_FROM_U8(1, /* bit 0 - L2 Hardware Enabled. (RO) */ + 1, /* bit 8 - L2 Enabled (R/W). */ + 0, /* bit 23 - L2 Not Present (RO). */ + 0); + break; + case MSR_PKG_CST_CONFIG_CONTROL: + *puValue = pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl; + break; + case MSR_CORE_THREAD_COUNT: + { + /** @todo restrict this to nehalem. */ + PVM pVM = pVCpu->CTX_SUFF(pVM); /* Note! Not sweating the 4-bit core count limit on westmere. */ + *puValue = (pVM->cCpus & 0xffff) | ((pVM->cCpus & 0xffff) << 16); + break; + } + + case MSR_FLEX_RATIO: + { + /** @todo Check for P4, it's different there. Try find accurate specs. */ + *puValue = (uint32_t)u8Multiplier << 8; + break; + } + } + break; + +#if 0 /* Only on pentium CPUs! */ + /* Event counters, not supported. */ + case MSR_IA32_CESR: + case MSR_IA32_CTR0: + case MSR_IA32_CTR1: + *puValue = 0; + break; +#endif + + + /* + * AMD specific MSRs: + */ + case MSR_K8_SYSCFG: + case MSR_K8_INT_PENDING: + case MSR_K8_NB_CFG: /* (All known values are 0 on reset.) */ + case MSR_K8_HWCR: /* Very interesting bits here. :) */ + case MSR_K8_VM_CR: /* Windows 8 */ + case 0xc0011029: /* quick fix for FreeBSd 9.1. */ + case 0xc0010042: /* quick fix for something. */ + case 0xc001102a: /* quick fix for w2k8 + opposition. */ + case 0xc0011004: /* quick fix for the opposition. */ + case 0xc0011005: /* quick fix for the opposition. */ + case 0xc0011023: /* quick fix for the opposition. */ + case MSR_K7_EVNTSEL0: /* quick fix for the opposition. */ + case MSR_K7_EVNTSEL1: /* quick fix for the opposition. */ + case MSR_K7_EVNTSEL2: /* quick fix for the opposition. */ + case MSR_K7_EVNTSEL3: /* quick fix for the opposition. */ + case MSR_K7_PERFCTR0: /* quick fix for the opposition. */ + case MSR_K7_PERFCTR1: /* quick fix for the opposition. */ + case MSR_K7_PERFCTR2: /* quick fix for the opposition. */ + case MSR_K7_PERFCTR3: /* quick fix for the opposition. */ + *puValue = 0; + if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_AMD) + { + Log(("CPUM: MSR %#x is AMD, the virtual CPU isn't an Intel one -> #GP\n", idMsr)); + return VERR_CPUM_RAISE_GP_0; } + /* ignored */ break; default: /* * Hand the X2APIC range to PDM and the APIC. */ - if ( idMsr >= MSR_IA32_APIC_START - && idMsr < MSR_IA32_APIC_END) + if ( idMsr >= MSR_IA32_X2APIC_START + && idMsr <= MSR_IA32_X2APIC_END) { rc = PDMApicReadMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, puValue); if (RT_SUCCESS(rc)) @@ -1083,6 +1257,30 @@ VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue) /** + * Query an MSR. + * + * The caller is responsible for checking privilege if the call is the result + * of a RDMSR instruction. We'll do the rest. + * + * @retval VINF_SUCCESS on success. + * @retval VERR_CPUM_RAISE_GP_0 on failure (invalid MSR), the caller is + * expected to take the appropriate actions. @a *puValue is set to 0. + * @param pVCpu Pointer to the VMCPU. + * @param idMsr The MSR. + * @param puValue Where to return the value. + * + * @remarks This will always return the right values, even when we're in the + * recompiler. + */ +VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue) +{ + int rc = cpumQueryGuestMsrInt(pVCpu, idMsr, puValue); + LogFlow(("CPUMQueryGuestMsr: %#x -> %llx rc=%d\n", idMsr, *puValue, rc)); + return rc; +} + + +/** * Sets the MSR. * * The caller is responsible for checking privilege if the call is the result @@ -1102,6 +1300,8 @@ VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue) */ VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue) { + LogFlow(("CPUMSetGuestMsr: %#x <- %#llx\n", idMsr, uValue)); + /* * If we don't indicate MSR support in the CPUID feature bits, indicate * that a #GP(0) should be raised. @@ -1121,7 +1321,7 @@ VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue) break; case MSR_IA32_APICBASE: - rc = PDMApicSetBase(pVCpu->CTX_SUFF(pVM), uValue); + rc = PDMApicSetBase(pVCpu, uValue); if (rc != VINF_SUCCESS) rc = VERR_CPUM_RAISE_GP_0; break; @@ -1153,12 +1353,28 @@ VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue) && (uValue & 0xff) != 5 && (uValue & 0xff) != 6) ) { - Log(("MSR_IA32_MTRR_DEF_TYPE: #GP(0) - writing reserved value (%#llx)\n", uValue)); + Log(("CPUM: MSR_IA32_MTRR_DEF_TYPE: #GP(0) - writing reserved value (%#llx)\n", uValue)); return VERR_CPUM_RAISE_GP_0; } pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType = uValue; break; + case IA32_MTRR_PHYSBASE0: case IA32_MTRR_PHYSMASK0: + case IA32_MTRR_PHYSBASE1: case IA32_MTRR_PHYSMASK1: + case IA32_MTRR_PHYSBASE2: case IA32_MTRR_PHYSMASK2: + case IA32_MTRR_PHYSBASE3: case IA32_MTRR_PHYSMASK3: + case IA32_MTRR_PHYSBASE4: case IA32_MTRR_PHYSMASK4: + case IA32_MTRR_PHYSBASE5: case IA32_MTRR_PHYSMASK5: + case IA32_MTRR_PHYSBASE6: case IA32_MTRR_PHYSMASK6: + case IA32_MTRR_PHYSBASE7: case IA32_MTRR_PHYSMASK7: + /** @todo implement variable MTRRs. */ + break; +#if 0 /** @todo newer CPUs have more, figure since when and do selective GP(). */ + case IA32_MTRR_PHYSBASE8: case IA32_MTRR_PHYSMASK8: + case IA32_MTRR_PHYSBASE9: case IA32_MTRR_PHYSMASK9: + break; +#endif + case IA32_MTRR_FIX64K_00000: pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000 = uValue; break; @@ -1220,7 +1436,7 @@ VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue) if ( (uOldEFER & MSR_K6_EFER_LME) != (uValue & fMask & MSR_K6_EFER_LME) && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)) { - Log(("Illegal MSR_K6_EFER_LME change: paging is enabled!!\n")); + Log(("CPUM: Illegal MSR_K6_EFER_LME change: paging is enabled!!\n")); return VERR_CPUM_RAISE_GP_0; } @@ -1235,7 +1451,7 @@ VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue) != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))) { /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/); - HWACCMFlushTLB(pVCpu); + HMFlushTLB(pVCpu); /* Notify PGM about NXE changes. */ if ( (uOldEFER & MSR_K6_EFER_NXE) @@ -1277,6 +1493,10 @@ VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue) pVCpu->cpum.s.GuestMsrs.msr.TscAux = uValue; break; + case MSR_IA32_DEBUGCTL: + /** @todo virtualize DEBUGCTL and relatives */ + break; + /* * Intel specifics MSRs: */ @@ -1284,24 +1504,75 @@ VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue) case MSR_IA32_BIOS_SIGN_ID: /* fam/mod >= 6_01 */ case MSR_IA32_BIOS_UPDT_TRIG: /* fam/mod >= 6_01 */ /*case MSR_IA32_MCP_CAP: - read-only */ - /*case MSR_IA32_MCP_STATUS: - read-only */ - /*case MSR_IA32_MCP_CTRL: - indicated as not present in CAP */ + /*case MSR_IA32_MCG_STATUS: - read-only */ + /*case MSR_IA32_MCG_CTRL: - indicated as not present in CAP */ /*case MSR_IA32_MC0_CTL: - read-only? */ /*case MSR_IA32_MC0_STATUS: - read-only? */ + case MSR_PKG_CST_CONFIG_CONTROL: if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_INTEL) { - Log(("MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr)); + Log(("CPUM: MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr)); + return VERR_CPUM_RAISE_GP_0; + } + + switch (idMsr) + { + case MSR_PKG_CST_CONFIG_CONTROL: + { + if (pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl & RT_BIT_64(15)) + { + Log(("MSR_PKG_CST_CONFIG_CONTROL: Write protected -> #GP\n")); + return VERR_CPUM_RAISE_GP_0; + } + static uint64_t s_fMask = UINT64_C(0x01f08407); /** @todo Only Nehalem has 24; Only Sandy has 27 and 28. */ + static uint64_t s_fGpInvalid = UINT64_C(0xffffffff00ff0000); /** @todo figure out exactly what's off limits. */ + if ((uValue & s_fGpInvalid) || (uValue & 7) >= 5) + { + Log(("MSR_PKG_CST_CONFIG_CONTROL: Invalid value %#llx -> #GP\n", uValue)); + return VERR_CPUM_RAISE_GP_0; + } + pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = uValue & s_fMask; + break; + } + + } + /* ignored */ + break; + + /* + * AMD specific MSRs: + */ + case MSR_K8_SYSCFG: /** @todo can be written, but we ignore that for now. */ + case MSR_K8_INT_PENDING: /** @todo can be written, but we ignore that for now. */ + case MSR_K8_NB_CFG: /** @todo can be written; the apicid swapping might be used and would need saving, but probably unnecessary. */ + case 0xc0011029: /* quick fix for FreeBSd 9.1. */ + case 0xc0010042: /* quick fix for something. */ + case 0xc001102a: /* quick fix for w2k8 + opposition. */ + case 0xc0011004: /* quick fix for the opposition. */ + case 0xc0011005: /* quick fix for the opposition. */ + case MSR_K7_EVNTSEL0: /* quick fix for the opposition. */ + case MSR_K7_EVNTSEL1: /* quick fix for the opposition. */ + case MSR_K7_EVNTSEL2: /* quick fix for the opposition. */ + case MSR_K7_EVNTSEL3: /* quick fix for the opposition. */ + case MSR_K7_PERFCTR0: /* quick fix for the opposition. */ + case MSR_K7_PERFCTR1: /* quick fix for the opposition. */ + case MSR_K7_PERFCTR2: /* quick fix for the opposition. */ + case MSR_K7_PERFCTR3: /* quick fix for the opposition. */ + if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_AMD) + { + Log(("CPUM: MSR %#x is AMD, the virtual CPU isn't an Intel one -> #GP\n", idMsr)); return VERR_CPUM_RAISE_GP_0; } /* ignored */ break; + default: /* * Hand the X2APIC range to PDM and the APIC. */ - if ( idMsr >= MSR_IA32_APIC_START - && idMsr < MSR_IA32_APIC_END) + if ( idMsr >= MSR_IA32_X2APIC_START + && idMsr <= MSR_IA32_X2APIC_END) { rc = PDMApicWriteMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, uValue); if (rc != VINF_SUCCESS) @@ -1318,6 +1589,8 @@ VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue) return rc; } +#endif /* !VBOX_WITH_NEW_MSR_CODE */ + VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit) { @@ -1514,7 +1787,7 @@ VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue) case DISCREG_CR8: { uint8_t u8Tpr; - int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /*pfPending*/); + int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */); if (RT_FAILURE(rc)) { AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc)); @@ -1586,6 +1859,66 @@ VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu) /** + * Looks up a CPUID leaf in the CPUID leaf array. + * + * @returns Pointer to the leaf if found, NULL if not. + * + * @param pVM Pointer to the cross context VM structure. + * @param uLeaf The leaf to get. + * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it + * isn't. + */ +PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf) +{ + unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves; + if (iEnd) + { + unsigned iStart = 0; + PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves); + for (;;) + { + unsigned i = iStart + (iEnd - iStart) / 2U; + if (uLeaf < paLeaves[i].uLeaf) + { + if (i <= iStart) + return NULL; + iEnd = i; + } + else if (uLeaf > paLeaves[i].uLeaf) + { + i += 1; + if (i >= iEnd) + return NULL; + iStart = i; + } + else + { + uSubLeaf &= paLeaves[i].fSubLeafMask; + if (uSubLeaf != paLeaves[i].uSubLeaf) + { + /* Find the right subleaf. We return the last one before + uSubLeaf if we don't find an exact match. */ + if (uSubLeaf < paLeaves[i].uSubLeaf) + while ( i > 0 + && uLeaf == paLeaves[i].uLeaf + && uSubLeaf < paLeaves[i].uSubLeaf) + i--; + else + while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves + && uLeaf == paLeaves[i + 1].uLeaf + && uSubLeaf >= paLeaves[i + 1].uSubLeaf) + i++; + } + return &paLeaves[i]; + } + } + } + + return NULL; +} + + +/** * Gets a CPUID leaf. * * @param pVCpu Pointer to the VMCPU. @@ -1628,7 +1961,7 @@ VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, ui if ( iLeaf == 4 && cCurrentCacheIndex < 3 - && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL) + && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL) { uint32_t type, level, sharing, linesize, partitions, associativity, sets, cores; @@ -1729,27 +2062,36 @@ VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM) */ VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature) { + PCPUMCPUIDLEAF pLeaf; + switch (enmFeature) { /* * Set the APIC bit in both feature masks. */ case CPUMCPUIDFEATURE_APIC: - if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) - pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC; - if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 - && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD) - pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC; - LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n")); + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); + if (pLeaf) + pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC; + + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); + if ( pLeaf + && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) + pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC; + + pVM->cpum.s.GuestFeatures.fApic = 1; + LogRel(("CPUM: SetGuestCpuIdFeature: Enabled APIC\n")); break; /* * Set the x2APIC bit in the standard feature mask. */ case CPUMCPUIDFEATURE_X2APIC: - if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) - pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC; - LogRel(("CPUMSetGuestCpuIdFeature: Enabled x2APIC\n")); + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); + if (pLeaf) + pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC; + pVM->cpum.s.GuestFeatures.fX2Apic = 1; + LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n")); break; /* @@ -1757,170 +2099,184 @@ VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature) * Assumes the caller knows what it's doing! (host must support these) */ case CPUMCPUIDFEATURE_SEP: - { - if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP)) + if (!pVM->cpum.s.HostFeatures.fSysEnter) { AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n")); return; } - if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) - pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP; - LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n")); + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); + if (pLeaf) + pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP; + pVM->cpum.s.GuestFeatures.fSysEnter = 1; + LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n")); break; - } /* * Set the syscall/sysret bit in the extended feature mask. * Assumes the caller knows what it's doing! (host must support these) */ case CPUMCPUIDFEATURE_SYSCALL: - { - if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001 - || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)) + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); + if ( !pLeaf + || !pVM->cpum.s.HostFeatures.fSysCall) { #if HC_ARCH_BITS == 32 - /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32 bits mode. - * Even when the cpu is capable of doing so in 64 bits mode. - */ - if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001 - || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) - || !(ASMCpuId_EDX(1) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)) + /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32-bit + mode by Intel, even when the cpu is capable of doing so in + 64-bit mode. Long mode requires syscall support. */ + if (!pVM->cpum.s.HostFeatures.fLongMode) #endif { - LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n")); + LogRel(("CPUM: WARNING! Can't turn on SYSCALL/SYSRET when the host doesn't support it!\n")); return; } } + /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */ - pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL; - LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n")); + pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL; + pVM->cpum.s.GuestFeatures.fSysCall = 1; + LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n")); break; - } /* * Set the PAE bit in both feature masks. * Assumes the caller knows what it's doing! (host must support these) */ case CPUMCPUIDFEATURE_PAE: - { - if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE)) + if (!pVM->cpum.s.HostFeatures.fPae) { - LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n")); + LogRel(("CPUM: WARNING! Can't turn on PAE when the host doesn't support it!\n")); return; } - if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) - pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE; - if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 - && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD) - pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE; - LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n")); + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); + if (pLeaf) + pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE; + + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); + if ( pLeaf + && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) + pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE; + + pVM->cpum.s.GuestFeatures.fPae = 1; + LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n")); break; - } /* * Set the LONG MODE bit in the extended feature mask. * Assumes the caller knows what it's doing! (host must support these) */ case CPUMCPUIDFEATURE_LONG_MODE: - { - if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001 - || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)) + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); + if ( !pLeaf + || !pVM->cpum.s.HostFeatures.fLongMode) { - LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n")); + LogRel(("CPUM: WARNING! Can't turn on LONG MODE when the host doesn't support it!\n")); return; } /* Valid for both Intel and AMD. */ - pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE; - LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n")); + pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE; + pVM->cpum.s.GuestFeatures.fLongMode = 1; + LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n")); break; - } /* * Set the NX/XD bit in the extended feature mask. * Assumes the caller knows what it's doing! (host must support these) */ case CPUMCPUIDFEATURE_NX: - { - if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001 - || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX)) + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); + if ( !pLeaf + || !pVM->cpum.s.HostFeatures.fNoExecute) { - LogRel(("WARNING: Can't turn on NX/XD when the host doesn't support it!!\n")); + LogRel(("CPUM: WARNING! Can't turn on NX/XD when the host doesn't support it!\n")); return; } /* Valid for both Intel and AMD. */ - pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_NX; - LogRel(("CPUMSetGuestCpuIdFeature: Enabled NX\n")); + pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX; + pVM->cpum.s.GuestFeatures.fNoExecute = 1; + LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n")); break; - } + /* * Set the LAHF/SAHF support in 64-bit mode. * Assumes the caller knows what it's doing! (host must support this) */ case CPUMCPUIDFEATURE_LAHF: - { - if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001 - || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF)) + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); + if ( !pLeaf + || !pVM->cpum.s.HostFeatures.fLahfSahf) { - LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n")); + LogRel(("CPUM: WARNING! Can't turn on LAHF/SAHF when the host doesn't support it!\n")); return; } /* Valid for both Intel and AMD. */ - pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF; - LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n")); + pVM->cpum.s.aGuestCpuIdExt[1].ecx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF; + pVM->cpum.s.GuestFeatures.fLahfSahf = 1; + LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n")); break; - } + /* + * Set the page attribute table bit. This is alternative page level + * cache control that doesn't much matter when everything is + * virtualized, though it may when passing thru device memory. + */ case CPUMCPUIDFEATURE_PAT: - { - if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) - pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT; - if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 - && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD) - pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT; - LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAT\n")); + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); + if (pLeaf) + pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT; + + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); + if ( pLeaf + && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) + pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT; + + pVM->cpum.s.GuestFeatures.fPat = 1; + LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAT\n")); break; - } /* * Set the RDTSCP support bit. * Assumes the caller knows what it's doing! (host must support this) */ case CPUMCPUIDFEATURE_RDTSCP: - { - if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001 - || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_RDTSCP) - || pVM->cpum.s.u8PortableCpuIdLevel > 0) + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); + if ( !pLeaf + || !pVM->cpum.s.HostFeatures.fRdTscP + || pVM->cpum.s.u8PortableCpuIdLevel > 0) { if (!pVM->cpum.s.u8PortableCpuIdLevel) - LogRel(("WARNING: Can't turn on RDTSCP when the host doesn't support it!!\n")); + LogRel(("CPUM: WARNING! Can't turn on RDTSCP when the host doesn't support it!\n")); return; } /* Valid for both Intel and AMD. */ - pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP; - LogRel(("CPUMSetGuestCpuIdFeature: Enabled RDTSCP.\n")); + pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP; + pVM->cpum.s.HostFeatures.fRdTscP = 1; + LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n")); break; - } /* * Set the Hypervisor Present bit in the standard feature mask. */ case CPUMCPUIDFEATURE_HVP: - if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) - pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_HVP; - LogRel(("CPUMSetGuestCpuIdFeature: Enabled Hypervisor Present bit\n")); + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); + if (pLeaf) + pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP; + pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1; + LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n")); break; default: AssertMsgFailed(("enmFeature=%d\n", enmFeature)); break; } + for (VMCPUID i = 0; i < pVM->cCpus; i++) { PVMCPU pVCpu = &pVM->aCpus[i]; @@ -1940,37 +2296,23 @@ VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature) { switch (enmFeature) { - case CPUMCPUIDFEATURE_PAE: - { - if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) - return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE); - break; - } - - case CPUMCPUIDFEATURE_NX: - { - if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) - return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_NX); - } - - case CPUMCPUIDFEATURE_RDTSCP: - { - if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) - return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP); - break; - } - - case CPUMCPUIDFEATURE_LONG_MODE: - { - if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) - return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE); - break; - } - - default: - AssertMsgFailed(("enmFeature=%d\n", enmFeature)); + case CPUMCPUIDFEATURE_APIC: return pVM->cpum.s.GuestFeatures.fApic; + case CPUMCPUIDFEATURE_X2APIC: return pVM->cpum.s.GuestFeatures.fX2Apic; + case CPUMCPUIDFEATURE_SYSCALL: return pVM->cpum.s.GuestFeatures.fSysCall; + case CPUMCPUIDFEATURE_SEP: return pVM->cpum.s.GuestFeatures.fSysEnter; + case CPUMCPUIDFEATURE_PAE: return pVM->cpum.s.GuestFeatures.fPae; + case CPUMCPUIDFEATURE_NX: return pVM->cpum.s.GuestFeatures.fNoExecute; + case CPUMCPUIDFEATURE_LAHF: return pVM->cpum.s.GuestFeatures.fLahfSahf; + case CPUMCPUIDFEATURE_LONG_MODE: return pVM->cpum.s.GuestFeatures.fLongMode; + case CPUMCPUIDFEATURE_PAT: return pVM->cpum.s.GuestFeatures.fPat; + case CPUMCPUIDFEATURE_RDTSCP: return pVM->cpum.s.GuestFeatures.fRdTscP; + case CPUMCPUIDFEATURE_HVP: return pVM->cpum.s.GuestFeatures.fHypervisorPresent; + + case CPUMCPUIDFEATURE_INVALID: + case CPUMCPUIDFEATURE_32BIT_HACK: break; } + AssertFailed(); return false; } @@ -1983,82 +2325,93 @@ VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature) */ VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature) { + PCPUMCPUIDLEAF pLeaf; switch (enmFeature) { - /* - * Set the APIC bit in both feature masks. - */ case CPUMCPUIDFEATURE_APIC: - if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) - pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC; - if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 - && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD) - pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC; - Log(("CPUMClearGuestCpuIdFeature: Disabled APIC\n")); + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); + if (pLeaf) + pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC; + + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); + if ( pLeaf + && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) + pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC; + + pVM->cpum.s.GuestFeatures.fApic = 0; + Log(("CPUM: ClearGuestCpuIdFeature: Disabled APIC\n")); break; - /* - * Clear the x2APIC bit in the standard feature mask. - */ case CPUMCPUIDFEATURE_X2APIC: - if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) - pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC; - LogRel(("CPUMClearGuestCpuIdFeature: Disabled x2APIC\n")); + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); + if (pLeaf) + pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC; + pVM->cpum.s.GuestFeatures.fX2Apic = 0; + Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n")); break; case CPUMCPUIDFEATURE_PAE: - { - if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) - pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE; - if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 - && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD) - pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE; - LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n")); + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); + if (pLeaf) + pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE; + + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); + if ( pLeaf + && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) + pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE; + + pVM->cpum.s.GuestFeatures.fPae = 0; + Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n")); break; - } case CPUMCPUIDFEATURE_PAT: - { - if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) - pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT; - if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 - && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD) - pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT; - LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAT!\n")); + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); + if (pLeaf) + pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT; + + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); + if ( pLeaf + && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) + pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT; + + pVM->cpum.s.GuestFeatures.fPat = 0; + Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n")); break; - } case CPUMCPUIDFEATURE_LONG_MODE: - { - if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) - pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE; + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); + if (pLeaf) + pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE; + pVM->cpum.s.GuestFeatures.fLongMode = 0; break; - } case CPUMCPUIDFEATURE_LAHF: - { - if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) - pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF; + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); + if (pLeaf) + pVM->cpum.s.aGuestCpuIdExt[1].ecx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF; + pVM->cpum.s.GuestFeatures.fLahfSahf = 0; break; - } case CPUMCPUIDFEATURE_RDTSCP: - { - if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) - pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP; - LogRel(("CPUMClearGuestCpuIdFeature: Disabled RDTSCP!\n")); + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); + if (pLeaf) + pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP; + pVM->cpum.s.GuestFeatures.fRdTscP = 0; + Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n")); break; - } case CPUMCPUIDFEATURE_HVP: - if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) - pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_HVP; + pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); + if (pLeaf) + pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP; + pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0; break; default: AssertMsgFailed(("enmFeature=%d\n", enmFeature)); break; } + for (VMCPUID i = 0; i < pVM->cCpus; i++) { PVMCPU pVCpu = &pVM->aCpus[i]; @@ -2075,7 +2428,7 @@ VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature) */ VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM) { - return pVM->cpum.s.enmHostCpuVendor; + return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor; } @@ -2087,49 +2440,49 @@ VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM) */ VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM) { - return pVM->cpum.s.enmGuestCpuVendor; + return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor; } VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0) { pVCpu->cpum.s.Guest.dr[0] = uDr0; - return CPUMRecalcHyperDRx(pVCpu); + return CPUMRecalcHyperDRx(pVCpu, 0, false); } VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1) { pVCpu->cpum.s.Guest.dr[1] = uDr1; - return CPUMRecalcHyperDRx(pVCpu); + return CPUMRecalcHyperDRx(pVCpu, 1, false); } VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2) { pVCpu->cpum.s.Guest.dr[2] = uDr2; - return CPUMRecalcHyperDRx(pVCpu); + return CPUMRecalcHyperDRx(pVCpu, 2, false); } VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3) { pVCpu->cpum.s.Guest.dr[3] = uDr3; - return CPUMRecalcHyperDRx(pVCpu); + return CPUMRecalcHyperDRx(pVCpu, 3, false); } VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6) { pVCpu->cpum.s.Guest.dr[6] = uDr6; - return CPUMRecalcHyperDRx(pVCpu); + return VINF_SUCCESS; /* No need to recalc. */ } VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7) { pVCpu->cpum.s.Guest.dr[7] = uDr7; - return CPUMRecalcHyperDRx(pVCpu); + return CPUMRecalcHyperDRx(pVCpu, 7, false); } @@ -2140,45 +2493,81 @@ VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value) if (iReg == 4 || iReg == 5) iReg += 2; pVCpu->cpum.s.Guest.dr[iReg] = Value; - return CPUMRecalcHyperDRx(pVCpu); + return CPUMRecalcHyperDRx(pVCpu, iReg, false); } /** - * Recalculates the hypervisor DRx register values based on - * current guest registers and DBGF breakpoints. + * Recalculates the hypervisor DRx register values based on current guest + * registers and DBGF breakpoints, updating changed registers depending on the + * context. + * + * This is called whenever a guest DRx register is modified (any context) and + * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous). + * + * In raw-mode context this function will reload any (hyper) DRx registers which + * comes out with a different value. It may also have to save the host debug + * registers if that haven't been done already. In this context though, we'll + * be intercepting and emulating all DRx accesses, so the hypervisor DRx values + * are only important when breakpoints are actually enabled. + * + * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be + * reloaded by the HM code if it changes. Further more, we will only use the + * combined register set when the VBox debugger is actually using hardware BPs, + * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't + * concern us here). * - * This is called whenever a guest DRx register is modified and when DBGF - * sets a hardware breakpoint. In guest context this function will reload - * any (hyper) DRx registers which comes out with a different value. + * In ring-3 we won't be loading anything, so well calculate hypervisor values + * all the time. * * @returns VINF_SUCCESS. * @param pVCpu Pointer to the VMCPU. + * @param iGstReg The guest debug register number that was modified. + * UINT8_MAX if not guest register. + * @param fForceHyper Used in HM to force hyper registers because of single + * stepping. */ -VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu) +VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper) { PVM pVM = pVCpu->CTX_SUFF(pVM); /* * Compare the DR7s first. * - * We only care about the enabled flags. The GE and LE flags are always - * set and we don't care if the guest doesn't set them. GD is virtualized - * when we dispatch #DB, we never enable it. + * We only care about the enabled flags. GD is virtualized when we + * dispatch the #DB, we never enable it. The DBGF DR7 value is will + * always have the LE and GE bits set, so no need to check and disable + * stuff if they're cleared like we have to for the guest DR7. */ + RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu); + if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE))) + uGstDr7 = 0; + else if (!(uGstDr7 & X86_DR7_LE)) + uGstDr7 &= ~X86_DR7_LE_ALL; + else if (!(uGstDr7 & X86_DR7_GE)) + uGstDr7 &= ~X86_DR7_GE_ALL; + const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM); -#ifdef CPUM_VIRTUALIZE_DRX - const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu); -#else - const RTGCUINTREG uGstDr7 = 0; + +#ifdef IN_RING0 + if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)) + fForceHyper = true; #endif - if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK) + if (( HMIsEnabled(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK) { + Assert(!CPUMIsGuestDebugStateActive(pVCpu)); +#ifdef IN_RC + bool const fHmEnabled = false; +#elif defined(IN_RING3) + bool const fHmEnabled = HMIsEnabled(pVM); +#endif + /* - * Ok, something is enabled. Recalc each of the breakpoints. - * Straight forward code, not optimized/minimized in any way. + * Ok, something is enabled. Recalc each of the breakpoints, taking + * the VM debugger ones of the guest ones. In raw-mode context we will + * not allow breakpoints with values inside the hypervisor area. */ - RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK; + RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK; /* bp 0 */ RTGCUINTREG uNewDr0; @@ -2189,11 +2578,16 @@ VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu) } else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0)) { - uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK); uNewDr0 = CPUMGetGuestDR0(pVCpu); +#ifndef IN_RING0 + if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr0)) + uNewDr0 = 0; + else +#endif + uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK); } else - uNewDr0 = pVCpu->cpum.s.Hyper.dr[0]; + uNewDr0 = 0; /* bp 1 */ RTGCUINTREG uNewDr1; @@ -2204,11 +2598,16 @@ VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu) } else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1)) { - uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK); uNewDr1 = CPUMGetGuestDR1(pVCpu); +#ifndef IN_RING0 + if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr1)) + uNewDr1 = 0; + else +#endif + uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK); } else - uNewDr1 = pVCpu->cpum.s.Hyper.dr[1]; + uNewDr1 = 0; /* bp 2 */ RTGCUINTREG uNewDr2; @@ -2219,11 +2618,16 @@ VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu) } else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2)) { - uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK); uNewDr2 = CPUMGetGuestDR2(pVCpu); +#ifndef IN_RING0 + if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr2)) + uNewDr2 = 0; + else +#endif + uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK); } else - uNewDr2 = pVCpu->cpum.s.Hyper.dr[2]; + uNewDr2 = 0; /* bp 3 */ RTGCUINTREG uNewDr3; @@ -2234,47 +2638,122 @@ VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu) } else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3)) { - uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK); uNewDr3 = CPUMGetGuestDR3(pVCpu); +#ifndef IN_RING0 + if (fHmEnabled && MMHyperIsInsideArea(pVM, uNewDr3)) + uNewDr3 = 0; + else +#endif + uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK); } else - uNewDr3 = pVCpu->cpum.s.Hyper.dr[3]; + uNewDr3 = 0; /* * Apply the updates. */ #ifdef IN_RC - if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)) + /* Make sure to save host registers first. */ + if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST)) { - /** @todo save host DBx registers. */ + if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST)) + { + pVCpu->cpum.s.Host.dr6 = ASMGetDR6(); + pVCpu->cpum.s.Host.dr7 = ASMGetDR7(); + } + pVCpu->cpum.s.Host.dr0 = ASMGetDR0(); + pVCpu->cpum.s.Host.dr1 = ASMGetDR1(); + pVCpu->cpum.s.Host.dr2 = ASMGetDR2(); + pVCpu->cpum.s.Host.dr3 = ASMGetDR3(); + pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER; + + /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */ + pVCpu->cpum.s.Hyper.dr[0] = uNewDr0; + ASMSetDR0(uNewDr0); + pVCpu->cpum.s.Hyper.dr[1] = uNewDr1; + ASMSetDR1(uNewDr1); + pVCpu->cpum.s.Hyper.dr[2] = uNewDr2; + ASMSetDR2(uNewDr2); + pVCpu->cpum.s.Hyper.dr[3] = uNewDr3; + ASMSetDR3(uNewDr3); + ASMSetDR6(X86_DR6_INIT_VAL); + pVCpu->cpum.s.Hyper.dr[7] = uNewDr7; + ASMSetDR7(uNewDr7); } + else #endif - pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS; - if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3]) - CPUMSetHyperDR3(pVCpu, uNewDr3); - if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2]) - CPUMSetHyperDR2(pVCpu, uNewDr2); - if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1]) - CPUMSetHyperDR1(pVCpu, uNewDr1); - if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0]) - CPUMSetHyperDR0(pVCpu, uNewDr0); - if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7]) - CPUMSetHyperDR7(pVCpu, uNewDr7); + { + pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER; + if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3]) + CPUMSetHyperDR3(pVCpu, uNewDr3); + if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2]) + CPUMSetHyperDR2(pVCpu, uNewDr2); + if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1]) + CPUMSetHyperDR1(pVCpu, uNewDr1); + if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0]) + CPUMSetHyperDR0(pVCpu, uNewDr0); + if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7]) + CPUMSetHyperDR7(pVCpu, uNewDr7); + } + } +#ifdef IN_RING0 + else if (CPUMIsGuestDebugStateActive(pVCpu)) + { + /* + * Reload the register that was modified. Normally this won't happen + * as we won't intercept DRx writes when not having the hyper debug + * state loaded, but in case we do for some reason we'll simply deal + * with it. + */ + switch (iGstReg) + { + case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break; + case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break; + case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break; + case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break; + default: + AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3); + } } +#endif else { -#ifdef IN_RC - if (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS) + /* + * No active debug state any more. In raw-mode this means we have to + * make sure DR7 has everything disabled now, if we armed it already. + * In ring-0 we might end up here when just single stepping. + */ +#if defined(IN_RC) || defined(IN_RING0) + if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) { - /** @todo restore host DBx registers. */ +# ifdef IN_RC + ASMSetDR7(X86_DR7_INIT_VAL); +# endif + if (pVCpu->cpum.s.Hyper.dr[0]) + ASMSetDR0(0); + if (pVCpu->cpum.s.Hyper.dr[1]) + ASMSetDR1(0); + if (pVCpu->cpum.s.Hyper.dr[2]) + ASMSetDR2(0); + if (pVCpu->cpum.s.Hyper.dr[3]) + ASMSetDR3(0); + pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER; } #endif - pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS; + pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER; + + /* Clear all the registers. */ + pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK; + pVCpu->cpum.s.Hyper.dr[3] = 0; + pVCpu->cpum.s.Hyper.dr[2] = 0; + pVCpu->cpum.s.Hyper.dr[1] = 0; + pVCpu->cpum.s.Hyper.dr[0] = 0; + } Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n", pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1], - pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6], - pVCpu->cpum.s.Hyper.dr[7])); + pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6], + pVCpu->cpum.s.Hyper.dr[7])); return VINF_SUCCESS; } @@ -2398,8 +2877,10 @@ VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu) */ VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu) { + /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather + than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */ return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE) - && (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG) + && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG) && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA); } @@ -2432,6 +2913,7 @@ VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx) } #ifdef VBOX_WITH_RAW_MODE_NOT_R0 + /** * * @returns @c true if we've entered raw-mode and selectors with RPL=1 are @@ -2442,10 +2924,195 @@ VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu) { return pVCpu->cpum.s.fRawEntered; } -#endif + +/** + * Transforms the guest CPU state to raw-ring mode. + * + * This function will change the any of the cs and ss register with DPL=0 to DPL=1. + * + * @returns VBox status. (recompiler failure) + * @param pVCpu Pointer to the VMCPU. + * @param pCtxCore The context core (for trap usage). + * @see @ref pg_raw + */ +VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore) +{ + PVM pVM = pVCpu->CTX_SUFF(pVM); + + Assert(!pVCpu->cpum.s.fRawEntered); + Assert(!pVCpu->cpum.s.fRemEntered); + if (!pCtxCore) + pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest); + + /* + * Are we in Ring-0? + */ + if ( pCtxCore->ss.Sel + && (pCtxCore->ss.Sel & X86_SEL_RPL) == 0 + && !pCtxCore->eflags.Bits.u1VM) + { + /* + * Enter execution mode. + */ + PATMRawEnter(pVM, pCtxCore); + + /* + * Set CPL to Ring-1. + */ + pCtxCore->ss.Sel |= 1; + if ( pCtxCore->cs.Sel + && (pCtxCore->cs.Sel & X86_SEL_RPL) == 0) + pCtxCore->cs.Sel |= 1; + } + else + { +# ifdef VBOX_WITH_RAW_RING1 + if ( EMIsRawRing1Enabled(pVM) + && !pCtxCore->eflags.Bits.u1VM + && (pCtxCore->ss.Sel & X86_SEL_RPL) == 1) + { + /* Set CPL to Ring-2. */ + pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 2; + if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 1) + pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 2; + } +# else + AssertMsg((pCtxCore->ss.Sel & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM, + ("ring-1 code not supported\n")); +# endif + /* + * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well. + */ + PATMRawEnter(pVM, pCtxCore); + } + + /* + * Assert sanity. + */ + AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n")); + AssertReleaseMsg(pCtxCore->eflags.Bits.u2IOPL == 0, + ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL)); + Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP)); + + pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */ + + pVCpu->cpum.s.fRawEntered = true; + return VINF_SUCCESS; +} /** + * Transforms the guest CPU state from raw-ring mode to correct values. + * + * This function will change any selector registers with DPL=1 to DPL=0. + * + * @returns Adjusted rc. + * @param pVCpu Pointer to the VMCPU. + * @param rc Raw mode return code + * @param pCtxCore The context core (for trap usage). + * @see @ref pg_raw + */ +VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc) +{ + PVM pVM = pVCpu->CTX_SUFF(pVM); + + /* + * Don't leave if we've already left (in RC). + */ + Assert(!pVCpu->cpum.s.fRemEntered); + if (!pVCpu->cpum.s.fRawEntered) + return rc; + pVCpu->cpum.s.fRawEntered = false; + + PCPUMCTX pCtx = &pVCpu->cpum.s.Guest; + if (!pCtxCore) + pCtxCore = CPUMCTX2CORE(pCtx); + Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss.Sel & X86_SEL_RPL)); + AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss.Sel & X86_SEL_RPL), + ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL)); + + /* + * Are we executing in raw ring-1? + */ + if ( (pCtxCore->ss.Sel & X86_SEL_RPL) == 1 + && !pCtxCore->eflags.Bits.u1VM) + { + /* + * Leave execution mode. + */ + PATMRawLeave(pVM, pCtxCore, rc); + /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */ + /** @todo See what happens if we remove this. */ + if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1) + pCtxCore->ds.Sel &= ~X86_SEL_RPL; + if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1) + pCtxCore->es.Sel &= ~X86_SEL_RPL; + if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1) + pCtxCore->fs.Sel &= ~X86_SEL_RPL; + if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1) + pCtxCore->gs.Sel &= ~X86_SEL_RPL; + + /* + * Ring-1 selector => Ring-0. + */ + pCtxCore->ss.Sel &= ~X86_SEL_RPL; + if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 1) + pCtxCore->cs.Sel &= ~X86_SEL_RPL; + } + else + { + /* + * PATM is taking care of the IOPL and IF flags for us. + */ + PATMRawLeave(pVM, pCtxCore, rc); + if (!pCtxCore->eflags.Bits.u1VM) + { +# ifdef VBOX_WITH_RAW_RING1 + if ( EMIsRawRing1Enabled(pVM) + && (pCtxCore->ss.Sel & X86_SEL_RPL) == 2) + { + /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */ + /** @todo See what happens if we remove this. */ + if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 2) + pCtxCore->ds.Sel = (pCtxCore->ds.Sel & ~X86_SEL_RPL) | 1; + if ((pCtxCore->es.Sel & X86_SEL_RPL) == 2) + pCtxCore->es.Sel = (pCtxCore->es.Sel & ~X86_SEL_RPL) | 1; + if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 2) + pCtxCore->fs.Sel = (pCtxCore->fs.Sel & ~X86_SEL_RPL) | 1; + if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 2) + pCtxCore->gs.Sel = (pCtxCore->gs.Sel & ~X86_SEL_RPL) | 1; + + /* + * Ring-2 selector => Ring-1. + */ + pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 1; + if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 2) + pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 1; + } + else + { +# endif + /** @todo See what happens if we remove this. */ + if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1) + pCtxCore->ds.Sel &= ~X86_SEL_RPL; + if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1) + pCtxCore->es.Sel &= ~X86_SEL_RPL; + if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1) + pCtxCore->fs.Sel &= ~X86_SEL_RPL; + if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1) + pCtxCore->gs.Sel &= ~X86_SEL_RPL; +# ifdef VBOX_WITH_RAW_RING1 + } +# endif + } + } + + return rc; +} + +#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ + +/** * Updates the EFLAGS while we're in raw-mode. * * @param pVCpu Pointer to the VMCPU. @@ -2509,7 +3176,7 @@ VMMDECL(bool) CPUMSupportsFXSR(PVM pVM) */ VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM) { - return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER) != 0; + return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER); } @@ -2521,10 +3188,10 @@ VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM) */ VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM) { - return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL) != 0; + return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL); } -#ifndef IN_RING3 +#ifdef IN_RC /** * Lazily sync in the FPU/XMM state. @@ -2537,7 +3204,7 @@ VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu) return cpumHandleLazyFPUAsm(&pVCpu->cpum.s); } -#endif /* !IN_RING3 */ +#endif /* !IN_RC */ /** * Checks if we activated the FPU/XMM state of the guest OS. @@ -2547,16 +3214,20 @@ VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu) */ VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu) { - return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0; + return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU); } /** * Deactivate the FPU/XMM state of the guest OS. * @param pVCpu Pointer to the VMCPU. + * + * @todo r=bird: Why is this needed? Looks like a workaround for mishandled + * FPU state management. */ VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu) { + Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU)); pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU; } @@ -2565,13 +3236,27 @@ VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu) * Checks if the guest debug state is active. * * @returns boolean - * @param pVM Pointer to the VM. + * @param pVM Pointer to the VMCPU. */ VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu) { - return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS) != 0; + return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST); } + +/** + * Checks if the guest debug state is to be made active during the world-switch + * (currently only used for the 32->64 switcher case). + * + * @returns boolean + * @param pVM Pointer to the VMCPU. + */ +VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu) +{ + return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST); +} + + /** * Checks if the hyper debug state is active. * @@ -2580,31 +3265,33 @@ VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu) */ VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu) { - return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HYPER) != 0; + return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER); } /** - * Mark the guest's debug state as inactive. + * Checks if the hyper debug state is to be made active during the world-switch + * (currently only used for the 32->64 switcher case). * * @returns boolean - * @param pVM Pointer to the VM. + * @param pVM Pointer to the VMCPU. */ -VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu) +VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu) { - pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS; + return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER); } /** - * Mark the hypervisor's debug state as inactive. + * Mark the guest's debug state as inactive. * * @returns boolean * @param pVM Pointer to the VM. + * @todo This API doesn't make sense any more. */ -VMMDECL(void) CPUMDeactivateHyperDebugState(PVMCPU pVCpu) +VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu) { - pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER; + Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST))); } @@ -2631,6 +3318,19 @@ VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu) * Note! The SS RPL is always equal to the CPL, while the CS RPL * isn't necessarily equal if the segment is conforming. * See section 4.11.1 in the AMD manual. + * + * Update: Where the heck does it say CS.RPL can differ from CPL other than + * right after real->prot mode switch and when in V8086 mode? That + * section says the RPL specified in a direct transfere (call, jmp, + * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL + * it would be impossible for an exception handle or the iret + * instruction to figure out whether SS:ESP are part of the frame + * or not. VBox or qemu bug must've lead to this misconception. + * + * Update2: On an AMD bulldozer system here, I've no trouble loading a null + * selector into SS with an RPL other than the CPL when CPL != 3 and + * we're in 64-bit mode. The intel dev box doesn't allow this, on + * RPL = CPL. Weird. */ uint32_t uCpl; if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE) @@ -2643,8 +3343,20 @@ VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu) { uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL); #ifdef VBOX_WITH_RAW_MODE_NOT_R0 +# ifdef VBOX_WITH_RAW_RING1 + if (pVCpu->cpum.s.fRawEntered) + { + if ( uCpl == 2 + && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM))) + uCpl = 1; + else if (uCpl == 1) + uCpl = 0; + } + Assert(uCpl != 2); /* ring 2 support not allowed anymore. */ +# else if (uCpl == 1) uCpl = 0; +# endif #endif } } diff --git a/src/VBox/VMM/VMMAll/CPUMStack.cpp b/src/VBox/VMM/VMMAll/CPUMStack.cpp index 31506e4b..2d9ba007 100644 --- a/src/VBox/VMM/VMMAll/CPUMStack.cpp +++ b/src/VBox/VMM/VMMAll/CPUMStack.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMAll/CSAMAll.cpp b/src/VBox/VMM/VMMAll/CSAMAll.cpp index db9143d9..d2281a1c 100644 --- a/src/VBox/VMM/VMMAll/CSAMAll.cpp +++ b/src/VBox/VMM/VMMAll/CSAMAll.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -26,6 +26,7 @@ #include <VBox/vmm/csam.h> #include <VBox/vmm/pgm.h> #include <VBox/vmm/em.h> +#include <VBox/vmm/hm.h> #include <VBox/vmm/mm.h> #include <VBox/sup.h> #include <VBox/vmm/mm.h> @@ -37,11 +38,11 @@ #include <VBox/dbg.h> #include <VBox/err.h> #include <VBox/log.h> -#include <iprt/assert.h> #include <VBox/dis.h> #include <VBox/disopcode.h> -#include <iprt/string.h> +#include <iprt/assert.h> #include <iprt/asm.h> +#include <iprt/string.h> /** * Check if this page needs to be analysed by CSAM @@ -50,14 +51,15 @@ * @param pVM Pointer to the VM. * @param pvFault Fault address */ -VMMDECL(int) CSAMExecFault(PVM pVM, RTRCPTR pvFault) +VMM_INT_DECL(int) CSAMExecFault(PVM pVM, RTRCPTR pvFault) { - if(!CSAMIsEnabled(pVM)) + Assert(!HMIsEnabled(pVM)); + if (!CSAMIsEnabled(pVM)) return VINF_SUCCESS; LogFlow(("CSAMGCExecFault: for page %08X scanned=%d\n", pvFault, CSAMIsPageScanned(pVM, pvFault))); - if(CSAMIsPageScanned(pVM, pvFault)) + if (CSAMIsPageScanned(pVM, pvFault)) { // Already checked! STAM_COUNTER_ADD(&pVM->csam.s.StatNrKnownPagesGC, 1); @@ -77,10 +79,11 @@ VMMDECL(int) CSAMExecFault(PVM pVM, RTRCPTR pvFault) * @param pVM Pointer to the VM. * @param pPage GC page address */ -VMMDECL(bool) CSAMIsPageScanned(PVM pVM, RTRCPTR pPage) +VMM_INT_DECL(bool) CSAMIsPageScanned(PVM pVM, RTRCPTR pPage) { int pgdir, bit; uintptr_t page; + Assert(!HMIsEnabled(pVM)); page = (uintptr_t)pPage; pgdir = page >> X86_PAGE_4M_SHIFT; @@ -105,7 +108,7 @@ VMMDECL(bool) CSAMIsPageScanned(PVM pVM, RTRCPTR pPage) * @param fScanned Mark as scanned or not scanned * */ -VMMDECL(int) CSAMMarkPage(PVM pVM, RTRCUINTPTR pPage, bool fScanned) +VMM_INT_DECL(int) CSAMMarkPage(PVM pVM, RTRCUINTPTR pPage, bool fScanned) { int pgdir, bit; uintptr_t page; @@ -115,8 +118,9 @@ VMMDECL(int) CSAMMarkPage(PVM pVM, RTRCUINTPTR pPage, bool fScanned) Log(("CSAMMarkPage %RRv\n", pPage)); #endif - if(!CSAMIsEnabled(pVM)) + if (!CSAMIsEnabled(pVM)) return VINF_SUCCESS; + Assert(!HMIsEnabled(pVM)); page = (uintptr_t)pPage; pgdir = page >> X86_PAGE_4M_SHIFT; @@ -174,10 +178,11 @@ VMMDECL(int) CSAMMarkPage(PVM pVM, RTRCUINTPTR pPage, bool fScanned) * @param pVM Pointer to the VM. * @param GCPtr GC pointer of page */ -VMMDECL(bool) CSAMDoesPageNeedScanning(PVM pVM, RTRCUINTPTR GCPtr) +VMM_INT_DECL(bool) CSAMDoesPageNeedScanning(PVM pVM, RTRCUINTPTR GCPtr) { - if(!CSAMIsEnabled(pVM)) + if (!CSAMIsEnabled(pVM)) return false; + Assert(!HMIsEnabled(pVM)); if(CSAMIsPageScanned(pVM, (RTRCPTR)GCPtr)) { @@ -197,8 +202,9 @@ VMMDECL(bool) CSAMDoesPageNeedScanning(PVM pVM, RTRCUINTPTR GCPtr) * @param pVM Pointer to the VM. * @param GCPtr GC pointer of page */ -VMMDECL(void) CSAMMarkPossibleCodePage(PVM pVM, RTRCPTR GCPtr) +VMM_INT_DECL(void) CSAMMarkPossibleCodePage(PVM pVM, RTRCPTR GCPtr) { + Assert(!HMIsEnabled(pVM)); if (pVM->csam.s.cPossibleCodePages < RT_ELEMENTS(pVM->csam.s.pvPossibleCodePage)) { pVM->csam.s.pvPossibleCodePage[pVM->csam.s.cPossibleCodePages++] = (RTRCPTR)GCPtr; @@ -214,8 +220,9 @@ VMMDECL(void) CSAMMarkPossibleCodePage(PVM pVM, RTRCPTR GCPtr) * @returns VBox status code. * @param pVM Pointer to the VM. */ -VMMDECL(int) CSAMEnableScanning(PVM pVM) +VMM_INT_DECL(int) CSAMEnableScanning(PVM pVM) { + AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE); pVM->fCSAMEnabled = true; return VINF_SUCCESS; } @@ -226,7 +233,7 @@ VMMDECL(int) CSAMEnableScanning(PVM pVM) * @returns VBox status code. * @param pVM Pointer to the VM. */ -VMMDECL(int) CSAMDisableScanning(PVM pVM) +VMM_INT_DECL(int) CSAMDisableScanning(PVM pVM) { pVM->fCSAMEnabled = false; return VINF_SUCCESS; @@ -244,8 +251,10 @@ VMMDECL(int) CSAMDisableScanning(PVM pVM) * @param pVM Pointer to the VM. * @param GCPtr GC pointer of page table entry */ -VMMDECL(bool) CSAMIsKnownDangerousInstr(PVM pVM, RTRCUINTPTR GCPtr) +VMM_INT_DECL(bool) CSAMIsKnownDangerousInstr(PVM pVM, RTRCUINTPTR GCPtr) { + Assert(!HMIsEnabled(pVM)); + for (uint32_t i=0;i<pVM->csam.s.cDangerousInstr;i++) { if (pVM->csam.s.aDangerousInstr[i] == (RTRCPTR)GCPtr) diff --git a/src/VBox/VMM/VMMAll/DBGFAll.cpp b/src/VBox/VMM/VMMAll/DBGFAll.cpp index 716b6189..e2881a0b 100644 --- a/src/VBox/VMM/VMMAll/DBGFAll.cpp +++ b/src/VBox/VMM/VMMAll/DBGFAll.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -23,6 +23,7 @@ #include <VBox/vmm/dbgf.h> #include "DBGFInternal.h" #include <VBox/vmm/vm.h> +#include <VBox/err.h> #include <iprt/assert.h> @@ -32,9 +33,9 @@ * @returns DR7 from the DBGF point of view. * @param pVM Pointer to the VM. */ -VMMDECL(RTGCUINTREG) DBGFBpGetDR7(PVM pVM) +VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR7(PVM pVM) { - RTGCUINTREG uDr7 = X86_DR7_GD | X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK; + RTGCUINTREG uDr7 = X86_DR7_GD | X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK; PDBGFBP pBp = &pVM->dbgf.s.aHwBreakpoints[0]; unsigned cLeft = RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); while (cLeft-- > 0) @@ -63,7 +64,7 @@ VMMDECL(RTGCUINTREG) DBGFBpGetDR7(PVM pVM) * @returns DR0 from the DBGF point of view. * @param pVM Pointer to the VM. */ -VMMDECL(RTGCUINTREG) DBGFBpGetDR0(PVM pVM) +VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR0(PVM pVM) { PCDBGFBP pBp = &pVM->dbgf.s.aHwBreakpoints[0]; Assert(pBp->u.Reg.iReg == 0); @@ -77,7 +78,7 @@ VMMDECL(RTGCUINTREG) DBGFBpGetDR0(PVM pVM) * @returns DR1 from the DBGF point of view. * @param pVM Pointer to the VM. */ -VMMDECL(RTGCUINTREG) DBGFBpGetDR1(PVM pVM) +VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR1(PVM pVM) { PCDBGFBP pBp = &pVM->dbgf.s.aHwBreakpoints[1]; Assert(pBp->u.Reg.iReg == 1); @@ -91,7 +92,7 @@ VMMDECL(RTGCUINTREG) DBGFBpGetDR1(PVM pVM) * @returns DR2 from the DBGF point of view. * @param pVM Pointer to the VM. */ -VMMDECL(RTGCUINTREG) DBGFBpGetDR2(PVM pVM) +VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR2(PVM pVM) { PCDBGFBP pBp = &pVM->dbgf.s.aHwBreakpoints[2]; Assert(pBp->u.Reg.iReg == 2); @@ -105,7 +106,7 @@ VMMDECL(RTGCUINTREG) DBGFBpGetDR2(PVM pVM) * @returns DR3 from the DBGF point of view. * @param pVM Pointer to the VM. */ -VMMDECL(RTGCUINTREG) DBGFBpGetDR3(PVM pVM) +VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR3(PVM pVM) { PCDBGFBP pBp = &pVM->dbgf.s.aHwBreakpoints[3]; Assert(pBp->u.Reg.iReg == 3); @@ -114,13 +115,152 @@ VMMDECL(RTGCUINTREG) DBGFBpGetDR3(PVM pVM) /** + * Checks if any of the hardware breakpoints are armed. + * + * @returns true if armed, false if not. + * @param pVM The cross context VM structure. + */ +VMM_INT_DECL(bool) DBGFBpIsHwArmed(PVM pVM) +{ + Assert(RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints) == 4); + return (pVM->dbgf.s.aHwBreakpoints[0].fEnabled && pVM->dbgf.s.aHwBreakpoints[0].enmType == DBGFBPTYPE_REG) + || (pVM->dbgf.s.aHwBreakpoints[1].fEnabled && pVM->dbgf.s.aHwBreakpoints[1].enmType == DBGFBPTYPE_REG) + || (pVM->dbgf.s.aHwBreakpoints[2].fEnabled && pVM->dbgf.s.aHwBreakpoints[2].enmType == DBGFBPTYPE_REG) + || (pVM->dbgf.s.aHwBreakpoints[3].fEnabled && pVM->dbgf.s.aHwBreakpoints[3].enmType == DBGFBPTYPE_REG); +} + + +/** + * Checks if any of the hardware I/O breakpoints are armed. + * + * @returns true if armed, false if not. + * @param pVM The cross context VM structure. + */ +VMM_INT_DECL(bool) DBGFBpIsHwIoArmed(PVM pVM) +{ + Assert(RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints) == 4); + /** @todo cache this! */ + return ( pVM->dbgf.s.aHwBreakpoints[0].u.Reg.fType == X86_DR7_RW_IO + && pVM->dbgf.s.aHwBreakpoints[0].fEnabled + && pVM->dbgf.s.aHwBreakpoints[0].enmType == DBGFBPTYPE_REG + ) + || ( pVM->dbgf.s.aHwBreakpoints[1].u.Reg.fType == X86_DR7_RW_IO + && pVM->dbgf.s.aHwBreakpoints[1].fEnabled + && pVM->dbgf.s.aHwBreakpoints[1].enmType == DBGFBPTYPE_REG + ) + || ( pVM->dbgf.s.aHwBreakpoints[2].u.Reg.fType == X86_DR7_RW_IO + && pVM->dbgf.s.aHwBreakpoints[2].fEnabled + && pVM->dbgf.s.aHwBreakpoints[2].enmType == DBGFBPTYPE_REG + ) + || ( pVM->dbgf.s.aHwBreakpoints[3].u.Reg.fType == X86_DR7_RW_IO + && pVM->dbgf.s.aHwBreakpoints[3].fEnabled + && pVM->dbgf.s.aHwBreakpoints[3].enmType == DBGFBPTYPE_REG + ); +} + + +/** + * Checks I/O access for guest or hypervisor breakpoints. + * + * @returns Strict VBox status code + * @retval VINF_SUCCESS no breakpoint. + * @retval VINF_EM_DBG_BREAKPOINT hypervisor breakpoint triggered. + * @retval VINF_EM_RAW_GUEST_TRAP guest breakpoint triggered, DR6 and DR7 have + * been updated appropriately. + * + * @param pVM The cross context VM structure. + * @param pVCpu The cross context CPU structure for the calling EMT. + * @param pCtx The CPU context for the calling EMT. + * @param uIoPort The I/O port being accessed. + * @param cbValue The size/width of the access, in bytes. + */ +VMM_INT_DECL(VBOXSTRICTRC) DBGFBpCheckIo(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTIOPORT uIoPort, uint8_t cbValue) +{ + uint32_t const uIoPortFirst = uIoPort; + uint32_t const uIoPortLast = uIoPortFirst + cbValue - 1; + + + /* + * Check hyper breakpoints first as the VMM debugger has priority over + * the guest. + */ + for (unsigned iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++) + { + if ( pVM->dbgf.s.aHwBreakpoints[iBp].u.Reg.fType == X86_DR7_RW_IO + && pVM->dbgf.s.aHwBreakpoints[iBp].fEnabled + && pVM->dbgf.s.aHwBreakpoints[iBp].enmType == DBGFBPTYPE_REG ) + { + uint8_t cbReg = pVM->dbgf.s.aHwBreakpoints[iBp].u.Reg.cb; Assert(RT_IS_POWER_OF_TWO(cbReg)); + uint64_t uDrXFirst = pVM->dbgf.s.aHwBreakpoints[iBp].GCPtr & ~(uint64_t)(cbReg - 1); + uint64_t uDrXLast = uDrXFirst + cbReg - 1; + if (uDrXFirst <= uIoPortLast && uDrXLast >= uIoPortFirst) + { + /* (See also DBGFRZTrap01Handler.) */ + pVCpu->dbgf.s.iActiveBp = pVM->dbgf.s.aHwBreakpoints[iBp].iBp; + pVCpu->dbgf.s.fSingleSteppingRaw = false; + + LogFlow(("DBGFBpCheckIo: hit hw breakpoint %d at %04x:%RGv (iop %#x)\n", + pVM->dbgf.s.aHwBreakpoints[iBp].iBp, pCtx->cs.Sel, pCtx->rip, uIoPort)); + return VINF_EM_DBG_BREAKPOINT; + } + } + } + + /* + * Check the guest. + */ + uint32_t const uDr7 = pCtx->dr[7]; + if ( (uDr7 & X86_DR7_ENABLED_MASK) + && X86_DR7_ANY_RW_IO(uDr7) + && (pCtx->cr4 & X86_CR4_DE) ) + { + for (unsigned iBp = 0; iBp < 4; iBp++) + { + if ( (uDr7 & X86_DR7_L_G(iBp)) + && X86_DR7_GET_RW(uDr7, iBp) == X86_DR7_RW_IO) + { + /* ASSUME the breakpoint and the I/O width qualifier uses the same encoding (1 2 x 4). */ + static uint8_t const s_abInvAlign[4] = { 0, 1, 7, 3 }; + uint8_t cbInvAlign = s_abInvAlign[X86_DR7_GET_LEN(uDr7, iBp)]; + uint64_t uDrXFirst = pCtx->dr[iBp] & ~(uint64_t)cbInvAlign; + uint64_t uDrXLast = uDrXFirst + cbInvAlign; + + if (uDrXFirst <= uIoPortLast && uDrXLast >= uIoPortFirst) + { + /* + * Update DR6 and DR7. + * + * See "AMD64 Architecture Programmer's Manual Volume 2", + * chapter 13.1.1.3 for details on DR6 bits. The basics is + * that the B0..B3 bits are always cleared while the others + * must be cleared by software. + * + * The following sub chapters says the GD bit is always + * cleared when generating a #DB so the handler can safely + * access the debug registers. + */ + pCtx->dr[6] &= ~X86_DR6_B_MASK; + pCtx->dr[6] |= X86_DR6_B(iBp); + pCtx->dr[7] &= ~X86_DR7_GD; + LogFlow(("DBGFBpCheckIo: hit hw breakpoint %d at %04x:%RGv (iop %#x)\n", + pVM->dbgf.s.aHwBreakpoints[iBp].iBp, pCtx->cs.Sel, pCtx->rip, uIoPort)); + return VINF_EM_RAW_GUEST_TRAP; + } + } + } + } + return VINF_SUCCESS; +} + + +/** * Returns the single stepping state for a virtual CPU. * * @returns stepping (true) or not (false). * * @param pVCpu Pointer to the VMCPU. */ -VMMDECL(bool) DBGFIsStepping(PVMCPU pVCpu) +VMM_INT_DECL(bool) DBGFIsStepping(PVMCPU pVCpu) { return pVCpu->dbgf.s.fSingleSteppingRaw; } diff --git a/src/VBox/VMM/VMMAll/EMAll.cpp b/src/VBox/VMM/VMMAll/EMAll.cpp index 565aacab..4c4a595b 100644 --- a/src/VBox/VMM/VMMAll/EMAll.cpp +++ b/src/VBox/VMM/VMMAll/EMAll.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2012 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -33,7 +33,7 @@ #include "EMInternal.h" #include <VBox/vmm/vm.h> #include <VBox/vmm/vmm.h> -#include <VBox/vmm/hwaccm.h> +#include <VBox/vmm/hm.h> #include <VBox/vmm/tm.h> #include <VBox/vmm/pdmapi.h> #include <VBox/param.h> @@ -55,6 +55,11 @@ //# define VBOX_COMPARE_IEM_LAST #endif +#ifdef VBOX_WITH_RAW_RING1 +# define EM_EMULATE_SMSW +#endif + + /******************************************************************************* * Defined Constants And Macros * *******************************************************************************/ @@ -117,7 +122,7 @@ static size_t g_cbIemWrote; * @returns Current status. * @param pVCpu Pointer to the VMCPU. */ -VMMDECL(EMSTATE) EMGetState(PVMCPU pVCpu) +VMM_INT_DECL(EMSTATE) EMGetState(PVMCPU pVCpu) { return pVCpu->em.s.enmState; } @@ -127,7 +132,7 @@ VMMDECL(EMSTATE) EMGetState(PVMCPU pVCpu) * * @param pVCpu Pointer to the VMCPU. */ -VMMDECL(void) EMSetState(PVMCPU pVCpu, EMSTATE enmNewState) +VMM_INT_DECL(void) EMSetState(PVMCPU pVCpu, EMSTATE enmNewState) { /* Only allowed combination: */ Assert(pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI && enmNewState == EMSTATE_HALTED); @@ -175,13 +180,15 @@ VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu) * @param rax The content of RAX. * @param rcx The content of RCX. * @param rdx The content of RDX. + * @param GCPhys The physical address corresponding to rax. */ -VMM_INT_DECL(int) EMMonitorWaitPrepare(PVMCPU pVCpu, uint64_t rax, uint64_t rcx, uint64_t rdx) +VMM_INT_DECL(int) EMMonitorWaitPrepare(PVMCPU pVCpu, uint64_t rax, uint64_t rcx, uint64_t rdx, RTGCPHYS GCPhys) { pVCpu->em.s.MWait.uMonitorRAX = rax; pVCpu->em.s.MWait.uMonitorRCX = rcx; pVCpu->em.s.MWait.uMonitorRDX = rdx; pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_MONITOR_ACTIVE; + /** @todo Make use of GCPhys. */ /** @todo Complete MONITOR implementation. */ return VINF_SUCCESS; } @@ -211,23 +218,25 @@ VMM_INT_DECL(int) EMMonitorWaitPerform(PVMCPU pVCpu, uint64_t rax, uint64_t rcx) /** - * Determine if we should continue after encountering a hlt or mwait - * instruction. + * Determine if we should continue after encountering a mwait instruction. * * Clears MWAIT flags if returning @c true. * - * @returns boolean + * @returns true if we should continue, false if we should halt. * @param pVCpu Pointer to the VMCPU. * @param pCtx Current CPU context. */ -VMM_INT_DECL(bool) EMShouldContinueAfterHalt(PVMCPU pVCpu, PCPUMCTX pCtx) +VMM_INT_DECL(bool) EMMonitorWaitShouldContinue(PVMCPU pVCpu, PCPUMCTX pCtx) { if ( pCtx->eflags.Bits.u1IF || ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0)) == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0)) ) { - pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0); - return !!VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)); + if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))) + { + pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0); + return true; + } } return false; @@ -235,6 +244,21 @@ VMM_INT_DECL(bool) EMShouldContinueAfterHalt(PVMCPU pVCpu, PCPUMCTX pCtx) /** + * Determine if we should continue after encountering a hlt instruction. + * + * @returns true if we should continue, false if we should halt. + * @param pVCpu Pointer to the VMCPU. + * @param pCtx Current CPU context. + */ +VMM_INT_DECL(bool) EMShouldContinueAfterHalt(PVMCPU pVCpu, PCPUMCTX pCtx) +{ + if (pCtx->eflags.Bits.u1IF) + return !!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)); + return false; +} + + +/** * Locks REM execution to a single VCPU. * * @param pVM Pointer to the VM. @@ -246,7 +270,7 @@ VMMDECL(void) EMRemLock(PVM pVM) return; /* early init */ Assert(!PGMIsLockOwner(pVM)); - Assert(!IOMIsLockOwner(pVM)); + Assert(!IOMIsLockWriteOwner(pVM)); int rc = PDMCritSectEnter(&pVM->em.s.CritSectREM, VERR_SEM_BUSY); AssertRCSuccess(rc); #endif @@ -294,7 +318,7 @@ VMMDECL(bool) EMRemIsLockOwner(PVM pVM) * @returns VBox status code * @param pVM Pointer to the VM. */ -VMMDECL(int) EMRemTryLock(PVM pVM) +VMM_INT_DECL(int) EMRemTryLock(PVM pVM) { #ifdef VBOX_WITH_REM if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM)) @@ -328,7 +352,7 @@ static DECLCALLBACK(int) emReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_ else if (cbToRead < cbMinRead) cbToRead = cbMinRead; -#if defined(IN_RC) || defined(IN_RING3) +#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3)) /* * We might be called upon to interpret an instruction in a patch. */ @@ -377,9 +401,9 @@ static DECLCALLBACK(int) emReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_ */ if (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT) { - HWACCMInvalidatePage(pVCpu, uSrcAddr); + HMInvalidatePage(pVCpu, uSrcAddr); if (((uSrcAddr + cbToRead - 1) >> PAGE_SHIFT) != (uSrcAddr >> PAGE_SHIFT)) - HWACCMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1); + HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1); } #endif } @@ -403,14 +427,13 @@ DECLINLINE(int) emDisCoreOne(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, RTGCUINTP * * @returns VBox status code, see SELMToFlatEx and EMInterpretDisasOneEx for * details. - * @retval VERR_EM_INTERNAL_DISAS_ERROR on DISCoreOneEx failure. * * @param pVM Pointer to the VM. * @param pVCpu Pointer to the VMCPU. * @param pDis Where to return the parsed instruction info. * @param pcbInstr Where to return the instruction size. (optional) */ -VMMDECL(int) EMInterpretDisasCurrent(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, unsigned *pcbInstr) +VMM_INT_DECL(int) EMInterpretDisasCurrent(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, unsigned *pcbInstr) { PCPUMCTXCORE pCtxCore = CPUMCTX2CORE(CPUMQueryGuestCtxPtr(pVCpu)); RTGCPTR GCPtrInstr; @@ -437,7 +460,6 @@ VMMDECL(int) EMInterpretDisasCurrent(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, u * This is used by internally by the interpreter and by trap/access handlers. * * @returns VBox status code. - * @retval VERR_EM_INTERNAL_DISAS_ERROR on DISCoreOneEx failure. * * @param pVM Pointer to the VM. * @param pVCpu Pointer to the VMCPU. @@ -446,8 +468,8 @@ VMMDECL(int) EMInterpretDisasCurrent(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, u * @param pDis Where to return the parsed instruction info. * @param pcbInstr Where to return the instruction size. (optional) */ -VMMDECL(int) EMInterpretDisasOneEx(PVM pVM, PVMCPU pVCpu, RTGCUINTPTR GCPtrInstr, PCCPUMCTXCORE pCtxCore, - PDISCPUSTATE pDis, unsigned *pcbInstr) +VMM_INT_DECL(int) EMInterpretDisasOneEx(PVM pVM, PVMCPU pVCpu, RTGCUINTPTR GCPtrInstr, PCCPUMCTXCORE pCtxCore, + PDISCPUSTATE pDis, unsigned *pcbInstr) { Assert(pCtxCore == CPUMGetGuestCtxCore(pVCpu)); DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu); @@ -456,8 +478,8 @@ VMMDECL(int) EMInterpretDisasOneEx(PVM pVM, PVMCPU pVCpu, RTGCUINTPTR GCPtrInstr int rc = DISInstrWithReader(GCPtrInstr, enmCpuMode, emReadBytes, pVCpu, pDis, pcbInstr); if (RT_SUCCESS(rc)) return VINF_SUCCESS; - AssertMsgFailed(("DISCoreOne failed to GCPtrInstr=%RGv rc=%Rrc\n", GCPtrInstr, rc)); - return VERR_EM_INTERNAL_DISAS_ERROR; + AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("DISCoreOne failed to GCPtrInstr=%RGv rc=%Rrc\n", GCPtrInstr, rc)); + return rc; } @@ -679,7 +701,7 @@ static void emCompareWithIem(PVMCPU pVCpu, PCCPUMCTX pEmCtx, PCCPUMCTX pIemCtx, * Architecture System Developers Manual, Vol 3, 5.5) so we don't need * to worry about e.g. invalid modrm combinations (!) */ -VMMDECL(VBOXSTRICTRC) EMInterpretInstruction(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault) +VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstruction(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault) { Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu)); LogFlow(("EMInterpretInstruction %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault)); @@ -808,7 +830,7 @@ VMMDECL(VBOXSTRICTRC) EMInterpretInstruction(PVMCPU pVCpu, PCPUMCTXCORE pRegFram * Architecture System Developers Manual, Vol 3, 5.5) so we don't need * to worry about e.g. invalid modrm combinations (!) */ -VMMDECL(VBOXSTRICTRC) EMInterpretInstructionEx(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbWritten) +VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionEx(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbWritten) { LogFlow(("EMInterpretInstructionEx %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault)); Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu)); @@ -950,8 +972,8 @@ VMMDECL(VBOXSTRICTRC) EMInterpretInstructionEx(PVMCPU pVCpu, PCPUMCTXCORE pRegFr * @todo At this time we do NOT check if the instruction overwrites vital information. * Make sure this can't happen!! (will add some assertions/checks later) */ -VMMDECL(VBOXSTRICTRC) EMInterpretInstructionDisasState(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, - RTGCPTR pvFault, EMCODETYPE enmCodeType) +VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionDisasState(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, + RTGCPTR pvFault, EMCODETYPE enmCodeType) { LogFlow(("EMInterpretInstructionDisasState %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault)); Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu)); @@ -1028,7 +1050,7 @@ VMMDECL(VBOXSTRICTRC) EMInterpretInstructionDisasState(PVMCPU pVCpu, PDISCPUSTAT #endif } -#if defined(IN_RC) /*&& defined(VBOX_WITH_PATM)*/ +#ifdef IN_RC DECLINLINE(int) emRCStackRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb) { @@ -1048,7 +1070,7 @@ DECLINLINE(int) emRCStackRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void * @param pRegFrame The register frame. * */ -VMMDECL(int) EMInterpretIretV86ForPatm(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) +VMM_INT_DECL(int) EMInterpretIretV86ForPatm(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) { RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp; RTGCUINTPTR eip, cs, esp, ss, eflags, ds, es, fs, gs, uMask; @@ -1094,7 +1116,74 @@ VMMDECL(int) EMInterpretIretV86ForPatm(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegF return VINF_SUCCESS; } -#endif /* IN_RC && VBOX_WITH_PATM */ +/** + * IRET Emulation. + */ +static int emInterpretIret(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize) +{ +#ifdef VBOX_WITH_RAW_RING1 + NOREF(pvFault); NOREF(pcbSize); + if (EMIsRawRing1Enabled(pVM)) + { + RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp; + RTGCUINTPTR eip, cs, esp, ss, eflags, uMask; + int rc; + uint32_t cpl, rpl; + + /* We only execute 32-bits protected mode code in raw mode, so no need to bother to check for 16-bits code here. */ + /* @todo: we don't verify all the edge cases that generate #GP faults */ + + Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu)); + Assert(!CPUMIsGuestIn64BitCode(pVCpu)); + /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by + * this function. Fear that it may guru on us, thus not converted to + * IEM. */ + + rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4); + rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4); + rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4); + AssertRCReturn(rc, VERR_EM_INTERPRETER); + AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER); + + /* Deal with V86 above. */ + if (eflags & X86_EFL_VM) + return EMInterpretIretV86ForPatm(pVM, pVCpu, pRegFrame); + + cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame); + rpl = cs & X86_SEL_RPL; + + Log(("emInterpretIret: iret to CS:EIP=%04X:%08X eflags=%x\n", cs, eip, eflags)); + if (rpl != cpl) + { + rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4); + rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4); + AssertRCReturn(rc, VERR_EM_INTERPRETER); + Log(("emInterpretIret: return to different privilege level (rpl=%d cpl=%d)\n", rpl, cpl)); + Log(("emInterpretIret: SS:ESP=%04X:08X\n", ss, esp)); + pRegFrame->ss.Sel = ss; + pRegFrame->esp = esp; + } + pRegFrame->cs.Sel = cs; + pRegFrame->eip = eip; + + /* Adjust CS & SS as required. */ + CPUMRCRecheckRawState(pVCpu, pRegFrame); + + /* Mask away all reserved bits */ + uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID; + eflags &= uMask; + + CPUMRawSetEFlags(pVCpu, eflags); + Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF); + return VINF_SUCCESS; + } +#else + NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize); +#endif + return VERR_EM_INTERPRETER; +} + +#endif /* IN_RC */ @@ -1118,7 +1207,7 @@ VMMDECL(int) EMInterpretIretV86ForPatm(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegF * @param pRegFrame The register frame. * */ -VMMDECL(int) EMInterpretCpuId(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) +VMM_INT_DECL(int) EMInterpretCpuId(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) { Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu)); uint32_t iLeaf = pRegFrame->eax; @@ -1146,7 +1235,7 @@ VMMDECL(int) EMInterpretCpuId(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) * @param pRegFrame The register frame. * */ -VMMDECL(int) EMInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) +VMM_INT_DECL(int) EMInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) { Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu)); unsigned uCR4 = CPUMGetGuestCR4(pVCpu); @@ -1176,7 +1265,7 @@ VMMDECL(int) EMInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) * @param pCtx The CPU context. * */ -VMMDECL(int) EMInterpretRdtscp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) +VMM_INT_DECL(int) EMInterpretRdtscp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) { Assert(pCtx == CPUMQueryGuestCtxPtr(pVCpu)); uint32_t uCR4 = CPUMGetGuestCR4(pVCpu); @@ -1214,7 +1303,7 @@ VMMDECL(int) EMInterpretRdtscp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) * @param pRegFrame The register frame. * */ -VMMDECL(int) EMInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) +VMM_INT_DECL(int) EMInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) { Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu)); uint32_t uCR4 = CPUMGetGuestCR4(pVCpu); @@ -1241,7 +1330,7 @@ VMMDECL(int) EMInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) /** * MWAIT Emulation. */ -VMMDECL(VBOXSTRICTRC) EMInterpretMWait(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) +VMM_INT_DECL(VBOXSTRICTRC) EMInterpretMWait(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) { Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu)); uint32_t u32Dummy, u32ExtFeatures, cpl, u32MWaitFeatures; @@ -1280,7 +1369,7 @@ VMMDECL(VBOXSTRICTRC) EMInterpretMWait(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegF /** * MONITOR Emulation. */ -VMMDECL(int) EMInterpretMonitor(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) +VMM_INT_DECL(int) EMInterpretMonitor(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) { uint32_t u32Dummy, u32ExtFeatures, cpl; Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu)); @@ -1301,7 +1390,7 @@ VMMDECL(int) EMInterpretMonitor(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR)) return VERR_EM_INTERPRETER; /* not supported */ - EMMonitorWaitPrepare(pVCpu, pRegFrame->rax, pRegFrame->rcx, pRegFrame->rdx); + EMMonitorWaitPrepare(pVCpu, pRegFrame->rax, pRegFrame->rcx, pRegFrame->rdx, NIL_RTGCPHYS); return VINF_SUCCESS; } @@ -1318,7 +1407,7 @@ VMMDECL(int) EMInterpretMonitor(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) * @param pAddrGC Operand address. * */ -VMMDECL(VBOXSTRICTRC) EMInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pAddrGC) +VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pAddrGC) { /** @todo is addr always a flat linear address or ds based * (in absence of segment override prefixes)???? @@ -1396,14 +1485,14 @@ static int emUpdateCRx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t D if (pRegFrame->cs.Attr.n.u1Long) { AssertMsgFailed(("Illegal enabling of paging with CS.u1Long = 1!!\n")); - return VERR_EM_INTERPRETER; /* @todo generate #GP(0) */ + return VERR_EM_INTERPRETER; /** @todo generate #GP(0) */ } /* Illegal to switch to long mode before activating PAE first (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */ if (!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)) { AssertMsgFailed(("Illegal enabling of paging with PAE disabled!!\n")); - return VERR_EM_INTERPRETER; /* @todo generate #GP(0) */ + return VERR_EM_INTERPRETER; /** @todo generate #GP(0) */ } msrEFER |= MSR_K6_EFER_LMA; } @@ -1412,7 +1501,7 @@ static int emUpdateCRx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t D && !(val & X86_CR0_PG)) { msrEFER &= ~MSR_K6_EFER_LMA; - /* @todo Do we need to cut off rip here? High dword of rip is undefined, so it shouldn't really matter. */ + /** @todo Do we need to cut off rip here? High dword of rip is undefined, so it shouldn't really matter. */ } CPUMSetGuestEFER(pVCpu, msrEFER); } @@ -1466,8 +1555,10 @@ static int emUpdateCRx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t D VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); } # endif - if ((val ^ oldval) & X86_CR4_VME) +# ifdef VBOX_WITH_RAW_MODE + if (((val ^ oldval) & X86_CR4_VME) && !HMIsEnabled(pVM)) VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); +# endif rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu)); return rc2 == VINF_SUCCESS ? rc : rc2; @@ -1495,7 +1586,7 @@ static int emUpdateCRx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t D * @param SrcRegGen General purpose register index (USE_REG_E**)) * */ -VMMDECL(int) EMInterpretCRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint32_t SrcRegGen) +VMM_INT_DECL(int) EMInterpretCRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint32_t SrcRegGen) { uint64_t val; int rc; @@ -1526,7 +1617,7 @@ VMMDECL(int) EMInterpretCRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, * @param u16Data LMSW source data. * */ -VMMDECL(int) EMInterpretLMSW(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint16_t u16Data) +VMM_INT_DECL(int) EMInterpretLMSW(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint16_t u16Data) { Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu)); uint64_t OldCr0 = CPUMGetGuestCR0(pVCpu); @@ -1547,7 +1638,7 @@ VMMDECL(int) EMInterpretLMSW(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint * @param pVCpu Pointer to the VMCPU. * */ -VMMDECL(int) EMInterpretCLTS(PVM pVM, PVMCPU pVCpu) +VMM_INT_DECL(int) EMInterpretCLTS(PVM pVM, PVMCPU pVCpu) { NOREF(pVM); @@ -1558,6 +1649,127 @@ VMMDECL(int) EMInterpretCLTS(PVM pVM, PVMCPU pVCpu) } +#ifdef LOG_ENABLED +static const char *emMSRtoString(uint32_t uMsr) +{ + switch (uMsr) + { + case MSR_IA32_APICBASE: return "MSR_IA32_APICBASE"; + case MSR_IA32_CR_PAT: return "MSR_IA32_CR_PAT"; + case MSR_IA32_SYSENTER_CS: return "MSR_IA32_SYSENTER_CS"; + case MSR_IA32_SYSENTER_EIP: return "MSR_IA32_SYSENTER_EIP"; + case MSR_IA32_SYSENTER_ESP: return "MSR_IA32_SYSENTER_ESP"; + case MSR_K6_EFER: return "MSR_K6_EFER"; + case MSR_K8_SF_MASK: return "MSR_K8_SF_MASK"; + case MSR_K6_STAR: return "MSR_K6_STAR"; + case MSR_K8_LSTAR: return "MSR_K8_LSTAR"; + case MSR_K8_CSTAR: return "MSR_K8_CSTAR"; + case MSR_K8_FS_BASE: return "MSR_K8_FS_BASE"; + case MSR_K8_GS_BASE: return "MSR_K8_GS_BASE"; + case MSR_K8_KERNEL_GS_BASE: return "MSR_K8_KERNEL_GS_BASE"; + case MSR_K8_TSC_AUX: return "MSR_K8_TSC_AUX"; + case MSR_IA32_BIOS_SIGN_ID: return "Unsupported MSR_IA32_BIOS_SIGN_ID"; + case MSR_IA32_PLATFORM_ID: return "Unsupported MSR_IA32_PLATFORM_ID"; + case MSR_IA32_BIOS_UPDT_TRIG: return "Unsupported MSR_IA32_BIOS_UPDT_TRIG"; + case MSR_IA32_TSC: return "MSR_IA32_TSC"; + case MSR_IA32_MISC_ENABLE: return "MSR_IA32_MISC_ENABLE"; + case MSR_IA32_MTRR_CAP: return "MSR_IA32_MTRR_CAP"; + case MSR_IA32_MCG_CAP: return "Unsupported MSR_IA32_MCG_CAP"; + case MSR_IA32_MCG_STATUS: return "Unsupported MSR_IA32_MCG_STATUS"; + case MSR_IA32_MCG_CTRL: return "Unsupported MSR_IA32_MCG_CTRL"; + case MSR_IA32_MTRR_DEF_TYPE: return "MSR_IA32_MTRR_DEF_TYPE"; + case MSR_K7_EVNTSEL0: return "Unsupported MSR_K7_EVNTSEL0"; + case MSR_K7_EVNTSEL1: return "Unsupported MSR_K7_EVNTSEL1"; + case MSR_K7_EVNTSEL2: return "Unsupported MSR_K7_EVNTSEL2"; + case MSR_K7_EVNTSEL3: return "Unsupported MSR_K7_EVNTSEL3"; + case MSR_IA32_MC0_CTL: return "Unsupported MSR_IA32_MC0_CTL"; + case MSR_IA32_MC0_STATUS: return "Unsupported MSR_IA32_MC0_STATUS"; + case MSR_IA32_PERFEVTSEL0: return "Unsupported MSR_IA32_PERFEVTSEL0"; + case MSR_IA32_PERFEVTSEL1: return "Unsupported MSR_IA32_PERFEVTSEL1"; + case MSR_IA32_PERF_STATUS: return "MSR_IA32_PERF_STATUS"; + case MSR_IA32_PLATFORM_INFO: return "MSR_IA32_PLATFORM_INFO"; + case MSR_IA32_PERF_CTL: return "Unsupported MSR_IA32_PERF_CTL"; + case MSR_K7_PERFCTR0: return "Unsupported MSR_K7_PERFCTR0"; + case MSR_K7_PERFCTR1: return "Unsupported MSR_K7_PERFCTR1"; + case MSR_K7_PERFCTR2: return "Unsupported MSR_K7_PERFCTR2"; + case MSR_K7_PERFCTR3: return "Unsupported MSR_K7_PERFCTR3"; + case MSR_IA32_PMC0: return "Unsupported MSR_IA32_PMC0"; + case MSR_IA32_PMC1: return "Unsupported MSR_IA32_PMC1"; + case MSR_IA32_PMC2: return "Unsupported MSR_IA32_PMC2"; + case MSR_IA32_PMC3: return "Unsupported MSR_IA32_PMC3"; + } + return "Unknown MSR"; +} +#endif /* LOG_ENABLED */ + + +/** + * Interpret RDMSR + * + * @returns VBox status code. + * @param pVM Pointer to the VM. + * @param pVCpu Pointer to the VMCPU. + * @param pRegFrame The register frame. + */ +VMM_INT_DECL(int) EMInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) +{ + NOREF(pVM); + + /* Get the current privilege level. */ + if (CPUMGetGuestCPL(pVCpu) != 0) + { + Log4(("EM: Refuse RDMSR: CPL != 0\n")); + return VERR_EM_INTERPRETER; /* supervisor only */ + } + + uint64_t uValue; + int rc = CPUMQueryGuestMsr(pVCpu, pRegFrame->ecx, &uValue); + if (RT_UNLIKELY(rc != VINF_SUCCESS)) + { + Assert(rc == VERR_CPUM_RAISE_GP_0); + Log4(("EM: Refuse RDMSR: rc=%Rrc\n", rc)); + return VERR_EM_INTERPRETER; + } + pRegFrame->rax = (uint32_t) uValue; + pRegFrame->rdx = (uint32_t)(uValue >> 32); + LogFlow(("EMInterpretRdmsr %s (%x) -> %RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, uValue)); + return rc; +} + + +/** + * Interpret WRMSR + * + * @returns VBox status code. + * @param pVM Pointer to the VM. + * @param pVCpu Pointer to the VMCPU. + * @param pRegFrame The register frame. + */ +VMM_INT_DECL(int) EMInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) +{ + Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu)); + + /* Check the current privilege level, this instruction is supervisor only. */ + if (CPUMGetGuestCPL(pVCpu) != 0) + { + Log4(("EM: Refuse WRMSR: CPL != 0\n")); + return VERR_EM_INTERPRETER; /** @todo raise \#GP(0) */ + } + + int rc = CPUMSetGuestMsr(pVCpu, pRegFrame->ecx, RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx)); + if (rc != VINF_SUCCESS) + { + Assert(rc == VERR_CPUM_RAISE_GP_0); + Log4(("EM: Refuse WRMSR: rc=%d\n", rc)); + return VERR_EM_INTERPRETER; + } + LogFlow(("EMInterpretWrmsr %s (%x) val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, + RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx))); + NOREF(pVM); + return rc; +} + + /** * Interpret CRx read. * @@ -1569,7 +1781,7 @@ VMMDECL(int) EMInterpretCLTS(PVM pVM, PVMCPU pVCpu) * @param SrcRegCRx CRx register index (DISUSE_REG_CR*) * */ -VMMDECL(int) EMInterpretCRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegCrx) +VMM_INT_DECL(int) EMInterpretCRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegCrx) { Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu)); uint64_t val64; @@ -1602,26 +1814,37 @@ VMMDECL(int) EMInterpretCRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, u * @param SrcRegGen General purpose register index (USE_REG_E**)) * */ -VMMDECL(int) EMInterpretDRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegDrx, uint32_t SrcRegGen) +VMM_INT_DECL(int) EMInterpretDRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegDrx, uint32_t SrcRegGen) { Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu)); - uint64_t val; + uint64_t uNewDrX; int rc; NOREF(pVM); if (CPUMIsGuestIn64BitCode(pVCpu)) - rc = DISFetchReg64(pRegFrame, SrcRegGen, &val); + rc = DISFetchReg64(pRegFrame, SrcRegGen, &uNewDrX); else { uint32_t val32; rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32); - val = val32; + uNewDrX = val32; } if (RT_SUCCESS(rc)) { + if (DestRegDrx == 6) + { + uNewDrX |= X86_DR6_RA1_MASK; + uNewDrX &= ~X86_DR6_RAZ_MASK; + } + else if (DestRegDrx == 7) + { + uNewDrX |= X86_DR7_RA1_MASK; + uNewDrX &= ~X86_DR7_RAZ_MASK; + } + /** @todo we don't fail if illegal bits are set/cleared for e.g. dr7 */ - rc = CPUMSetGuestDRx(pVCpu, DestRegDrx, val); + rc = CPUMSetGuestDRx(pVCpu, DestRegDrx, uNewDrX); if (RT_SUCCESS(rc)) return rc; AssertMsgFailed(("CPUMSetGuestDRx %d failed\n", DestRegDrx)); @@ -1641,7 +1864,7 @@ VMMDECL(int) EMInterpretDRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, * @param SrcRegDRx DRx register index (USE_REG_DR*) * */ -VMMDECL(int) EMInterpretDRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegDrx) +VMM_INT_DECL(int) EMInterpretDRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegDrx) { uint64_t val64; Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu)); @@ -2507,121 +2730,134 @@ static int emInterpretMov(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE if(RT_FAILURE(rc)) return VERR_EM_INTERPRETER; -#ifdef IN_RC - if (TRPMHasTrap(pVCpu)) + if (param1.type == DISQPV_TYPE_ADDRESS) { - if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW) - { -#else - /** @todo Make this the default and don't rely on TRPM information. */ - if (param1.type == DISQPV_TYPE_ADDRESS) - { -#endif - RTGCPTR pDest; - uint64_t val64; + RTGCPTR pDest; + uint64_t val64; - switch(param1.type) - { - case DISQPV_TYPE_IMMEDIATE: - if(!(param1.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64))) - return VERR_EM_INTERPRETER; - /* fallthru */ + switch(param1.type) + { + case DISQPV_TYPE_IMMEDIATE: + if(!(param1.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64))) + return VERR_EM_INTERPRETER; + /* fallthru */ - case DISQPV_TYPE_ADDRESS: - pDest = (RTGCPTR)param1.val.val64; - pDest = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pDest); - break; + case DISQPV_TYPE_ADDRESS: + pDest = (RTGCPTR)param1.val.val64; + pDest = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pDest); + break; - default: - AssertFailed(); - return VERR_EM_INTERPRETER; - } + default: + AssertFailed(); + return VERR_EM_INTERPRETER; + } - switch(param2.type) - { - case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */ - val64 = param2.val.val64; - break; + switch(param2.type) + { + case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */ + val64 = param2.val.val64; + break; - default: - Log(("emInterpretMov: unexpected type=%d rip=%RGv\n", param2.type, (RTGCPTR)pRegFrame->rip)); - return VERR_EM_INTERPRETER; - } + default: + Log(("emInterpretMov: unexpected type=%d rip=%RGv\n", param2.type, (RTGCPTR)pRegFrame->rip)); + return VERR_EM_INTERPRETER; + } #ifdef LOG_ENABLED - if (pDis->uCpuMode == DISCPUMODE_64BIT) - LogFlow(("EMInterpretInstruction at %RGv: OP_MOV %RGv <- %RX64 (%d) &val64=%RHv\n", (RTGCPTR)pRegFrame->rip, pDest, val64, param2.size, &val64)); - else - LogFlow(("EMInterpretInstruction at %08RX64: OP_MOV %RGv <- %08X (%d) &val64=%RHv\n", pRegFrame->rip, pDest, (uint32_t)val64, param2.size, &val64)); + if (pDis->uCpuMode == DISCPUMODE_64BIT) + LogFlow(("EMInterpretInstruction at %RGv: OP_MOV %RGv <- %RX64 (%d) &val64=%RHv\n", (RTGCPTR)pRegFrame->rip, pDest, val64, param2.size, &val64)); + else + LogFlow(("EMInterpretInstruction at %08RX64: OP_MOV %RGv <- %08X (%d) &val64=%RHv\n", pRegFrame->rip, pDest, (uint32_t)val64, param2.size, &val64)); #endif - Assert(param2.size <= 8 && param2.size > 0); - EM_ASSERT_FAULT_RETURN(pDest == pvFault, VERR_EM_INTERPRETER); - rc = emRamWrite(pVM, pVCpu, pRegFrame, pDest, &val64, param2.size); - if (RT_FAILURE(rc)) - return VERR_EM_INTERPRETER; + Assert(param2.size <= 8 && param2.size > 0); + EM_ASSERT_FAULT_RETURN(pDest == pvFault, VERR_EM_INTERPRETER); + rc = emRamWrite(pVM, pVCpu, pRegFrame, pDest, &val64, param2.size); + if (RT_FAILURE(rc)) + return VERR_EM_INTERPRETER; - *pcbSize = param2.size; - } - else - { /* read fault */ - RTGCPTR pSrc; - uint64_t val64; + *pcbSize = param2.size; + } +#if defined(IN_RC) && defined(VBOX_WITH_RAW_RING1) + /* mov xx, cs instruction is dangerous in raw mode and replaced by an 'int3' by csam/patm. */ + else if ( param1.type == DISQPV_TYPE_REGISTER + && param2.type == DISQPV_TYPE_REGISTER) + { + AssertReturn((pDis->Param1.fUse & (DISUSE_REG_GEN8|DISUSE_REG_GEN16|DISUSE_REG_GEN32)), VERR_EM_INTERPRETER); + AssertReturn(pDis->Param2.fUse == DISUSE_REG_SEG, VERR_EM_INTERPRETER); + AssertReturn(pDis->Param2.Base.idxSegReg == DISSELREG_CS, VERR_EM_INTERPRETER); - /* Source */ - switch(param2.type) - { - case DISQPV_TYPE_IMMEDIATE: - if(!(param2.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64))) - return VERR_EM_INTERPRETER; - /* fallthru */ + uint32_t u32Cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame); + uint32_t uValCS = (pRegFrame->cs.Sel & ~X86_SEL_RPL) | u32Cpl; - case DISQPV_TYPE_ADDRESS: - pSrc = (RTGCPTR)param2.val.val64; - pSrc = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pSrc); - break; + Log(("EMInterpretInstruction: OP_MOV cs=%x->%x\n", pRegFrame->cs.Sel, uValCS)); + switch (param1.size) + { + case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t) uValCS); break; + case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)uValCS); break; + case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)uValCS); break; + default: + AssertFailed(); + return VERR_EM_INTERPRETER; + } + AssertRCReturn(rc, rc); + } +#endif + else + { /* read fault */ + RTGCPTR pSrc; + uint64_t val64; - default: + /* Source */ + switch(param2.type) + { + case DISQPV_TYPE_IMMEDIATE: + if(!(param2.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64))) return VERR_EM_INTERPRETER; - } + /* fallthru */ - Assert(param1.size <= 8 && param1.size > 0); - EM_ASSERT_FAULT_RETURN(pSrc == pvFault, VERR_EM_INTERPRETER); - rc = emRamRead(pVM, pVCpu, pRegFrame, &val64, pSrc, param1.size); - if (RT_FAILURE(rc)) - return VERR_EM_INTERPRETER; + case DISQPV_TYPE_ADDRESS: + pSrc = (RTGCPTR)param2.val.val64; + pSrc = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pSrc); + break; - /* Destination */ - switch(param1.type) - { - case DISQPV_TYPE_REGISTER: - switch(param1.size) - { - case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t) val64); break; - case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)val64); break; - case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)val64); break; - case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, val64); break; - default: - return VERR_EM_INTERPRETER; - } - if (RT_FAILURE(rc)) - return rc; - break; + default: + return VERR_EM_INTERPRETER; + } + + Assert(param1.size <= 8 && param1.size > 0); + EM_ASSERT_FAULT_RETURN(pSrc == pvFault, VERR_EM_INTERPRETER); + rc = emRamRead(pVM, pVCpu, pRegFrame, &val64, pSrc, param1.size); + if (RT_FAILURE(rc)) + return VERR_EM_INTERPRETER; + /* Destination */ + switch(param1.type) + { + case DISQPV_TYPE_REGISTER: + switch(param1.size) + { + case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg, (uint8_t) val64); break; + case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)val64); break; + case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)val64); break; + case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, val64); break; default: return VERR_EM_INTERPRETER; } + if (RT_FAILURE(rc)) + return rc; + break; + + default: + return VERR_EM_INTERPRETER; + } #ifdef LOG_ENABLED - if (pDis->uCpuMode == DISCPUMODE_64BIT) - LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %RX64 (%d)\n", pSrc, val64, param1.size)); - else - LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %08X (%d)\n", pSrc, (uint32_t)val64, param1.size)); + if (pDis->uCpuMode == DISCPUMODE_64BIT) + LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %RX64 (%d)\n", pSrc, val64, param1.size)); + else + LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %08X (%d)\n", pSrc, (uint32_t)val64, param1.size)); #endif - } - return VINF_SUCCESS; -#ifdef IN_RC } - return VERR_EM_INTERPRETER; -#endif + return VINF_SUCCESS; } @@ -2871,7 +3107,6 @@ static int emInterpretCmpXchg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTX */ static int emInterpretCmpXchg8b(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize) { - Assert(pDis->uCpuMode != DISCPUMODE_64BIT); /** @todo check */ DISQPVPARAMVAL param1; NOREF(pvFault); @@ -2925,7 +3160,7 @@ static int emInterpretCmpXchg8b(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMC } -#ifdef IN_RC /** @todo test+enable for HWACCM as well. */ +#ifdef IN_RC /** @todo test+enable for HM as well. */ /** * [LOCK] XADD emulation. */ @@ -3016,16 +3251,6 @@ static int emInterpretXAdd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCOR /** - * IRET Emulation. - */ -static int emInterpretIret(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize) -{ - /* only allow direct calls to EMInterpretIret for now */ - NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize); - return VERR_EM_INTERPRETER; -} - -/** * WBINVD Emulation. */ static int emInterpretWbInvd(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize) @@ -3325,7 +3550,7 @@ static int emInterpretLIGdt(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCO static int emInterpretSti(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize) { NOREF(pcbSize); - PPATMGCSTATE pGCState = PATMQueryGCState(pVM); + PPATMGCSTATE pGCState = PATMGetGCState(pVM); if(!pGCState) { @@ -3389,139 +3614,6 @@ static VBOXSTRICTRC emInterpretMWait(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, P } -#ifdef LOG_ENABLED -static const char *emMSRtoString(uint32_t uMsr) -{ - switch (uMsr) - { - case MSR_IA32_APICBASE: - return "MSR_IA32_APICBASE"; - case MSR_IA32_CR_PAT: - return "MSR_IA32_CR_PAT"; - case MSR_IA32_SYSENTER_CS: - return "MSR_IA32_SYSENTER_CS"; - case MSR_IA32_SYSENTER_EIP: - return "MSR_IA32_SYSENTER_EIP"; - case MSR_IA32_SYSENTER_ESP: - return "MSR_IA32_SYSENTER_ESP"; - case MSR_K6_EFER: - return "MSR_K6_EFER"; - case MSR_K8_SF_MASK: - return "MSR_K8_SF_MASK"; - case MSR_K6_STAR: - return "MSR_K6_STAR"; - case MSR_K8_LSTAR: - return "MSR_K8_LSTAR"; - case MSR_K8_CSTAR: - return "MSR_K8_CSTAR"; - case MSR_K8_FS_BASE: - return "MSR_K8_FS_BASE"; - case MSR_K8_GS_BASE: - return "MSR_K8_GS_BASE"; - case MSR_K8_KERNEL_GS_BASE: - return "MSR_K8_KERNEL_GS_BASE"; - case MSR_K8_TSC_AUX: - return "MSR_K8_TSC_AUX"; - case MSR_IA32_BIOS_SIGN_ID: - return "Unsupported MSR_IA32_BIOS_SIGN_ID"; - case MSR_IA32_PLATFORM_ID: - return "Unsupported MSR_IA32_PLATFORM_ID"; - case MSR_IA32_BIOS_UPDT_TRIG: - return "Unsupported MSR_IA32_BIOS_UPDT_TRIG"; - case MSR_IA32_TSC: - return "MSR_IA32_TSC"; - case MSR_IA32_MISC_ENABLE: - return "MSR_IA32_MISC_ENABLE"; - case MSR_IA32_MTRR_CAP: - return "MSR_IA32_MTRR_CAP"; - case MSR_IA32_MCP_CAP: - return "Unsupported MSR_IA32_MCP_CAP"; - case MSR_IA32_MCP_STATUS: - return "Unsupported MSR_IA32_MCP_STATUS"; - case MSR_IA32_MCP_CTRL: - return "Unsupported MSR_IA32_MCP_CTRL"; - case MSR_IA32_MTRR_DEF_TYPE: - return "MSR_IA32_MTRR_DEF_TYPE"; - case MSR_K7_EVNTSEL0: - return "Unsupported MSR_K7_EVNTSEL0"; - case MSR_K7_EVNTSEL1: - return "Unsupported MSR_K7_EVNTSEL1"; - case MSR_K7_EVNTSEL2: - return "Unsupported MSR_K7_EVNTSEL2"; - case MSR_K7_EVNTSEL3: - return "Unsupported MSR_K7_EVNTSEL3"; - case MSR_IA32_MC0_CTL: - return "Unsupported MSR_IA32_MC0_CTL"; - case MSR_IA32_MC0_STATUS: - return "Unsupported MSR_IA32_MC0_STATUS"; - case MSR_IA32_PERFEVTSEL0: - return "Unsupported MSR_IA32_PERFEVTSEL0"; - case MSR_IA32_PERFEVTSEL1: - return "Unsupported MSR_IA32_PERFEVTSEL1"; - case MSR_IA32_PERF_STATUS: - return "MSR_IA32_PERF_STATUS"; - case MSR_IA32_PLATFORM_INFO: - return "MSR_IA32_PLATFORM_INFO"; - case MSR_IA32_PERF_CTL: - return "Unsupported MSR_IA32_PERF_CTL"; - case MSR_K7_PERFCTR0: - return "Unsupported MSR_K7_PERFCTR0"; - case MSR_K7_PERFCTR1: - return "Unsupported MSR_K7_PERFCTR1"; - case MSR_K7_PERFCTR2: - return "Unsupported MSR_K7_PERFCTR2"; - case MSR_K7_PERFCTR3: - return "Unsupported MSR_K7_PERFCTR3"; - case MSR_IA32_PMC0: - return "Unsupported MSR_IA32_PMC0"; - case MSR_IA32_PMC1: - return "Unsupported MSR_IA32_PMC1"; - case MSR_IA32_PMC2: - return "Unsupported MSR_IA32_PMC2"; - case MSR_IA32_PMC3: - return "Unsupported MSR_IA32_PMC3"; - } - return "Unknown MSR"; -} -#endif /* LOG_ENABLED */ - - -/** - * Interpret RDMSR - * - * @returns VBox status code. - * @param pVM Pointer to the VM. - * @param pVCpu Pointer to the VMCPU. - * @param pRegFrame The register frame. - */ -VMMDECL(int) EMInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) -{ - /** @todo According to the Intel manuals, there's a REX version of RDMSR that is slightly different. - * That version clears the high dwords of both RDX & RAX */ - NOREF(pVM); - - /* Get the current privilege level. */ - if (CPUMGetGuestCPL(pVCpu) != 0) - { - Log4(("EM: Refuse RDMSR: CPL != 0\n")); - return VERR_EM_INTERPRETER; /* supervisor only */ - } - - uint64_t uValue; - int rc = CPUMQueryGuestMsr(pVCpu, pRegFrame->ecx, &uValue); - if (RT_UNLIKELY(rc != VINF_SUCCESS)) - { - Assert(rc == VERR_CPUM_RAISE_GP_0); - Log4(("EM: Refuse RDMSR: rc=%Rrc\n", rc)); - return VERR_EM_INTERPRETER; - } - pRegFrame->rax = (uint32_t) uValue; - pRegFrame->rdx = (uint32_t)(uValue >> 32); - LogFlow(("EMInterpretRdmsr %s (%x) -> %RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, uValue)); - return rc; -} - - /** * RDMSR Emulation. */ @@ -3536,39 +3628,6 @@ static int emInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCO /** - * Interpret WRMSR - * - * @returns VBox status code. - * @param pVM Pointer to the VM. - * @param pVCpu Pointer to the VMCPU. - * @param pRegFrame The register frame. - */ -VMMDECL(int) EMInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) -{ - Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu)); - - /* Check the current privilege level, this instruction is supervisor only. */ - if (CPUMGetGuestCPL(pVCpu) != 0) - { - Log4(("EM: Refuse WRMSR: CPL != 0\n")); - return VERR_EM_INTERPRETER; /** @todo raise \#GP(0) */ - } - - int rc = CPUMSetGuestMsr(pVCpu, pRegFrame->ecx, RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx)); - if (rc != VINF_SUCCESS) - { - Assert(rc == VERR_CPUM_RAISE_GP_0); - Log4(("EM: Refuse WRMSR: rc=%d\n", rc)); - return VERR_EM_INTERPRETER; - } - LogFlow(("EMInterpretWrmsr %s (%x) val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, - RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx))); - NOREF(pVM); - return rc; -} - - -/** * WRMSR Emulation. */ static int emInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize) @@ -3598,12 +3657,20 @@ DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPU(PVM pVM, PVMCPU pVCpu, PDISCP */ /* Get the current privilege level. */ uint32_t cpl = CPUMGetGuestCPL(pVCpu); - if ( cpl != 0 - && pDis->pCurInstr->uOpcode != OP_RDTSC) /* rdtsc requires emulation in ring 3 as well */ +#ifdef VBOX_WITH_RAW_RING1 + if ( !EMIsRawRing1Enabled(pVM) + || cpl > 1 + || pRegFrame->eflags.Bits.u2IOPL > cpl + ) +#endif { - Log(("WARNING: refusing instruction emulation for user-mode code!!\n")); - STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedUserMode)); - return VERR_EM_INTERPRETER; + if ( cpl != 0 + && pDis->pCurInstr->uOpcode != OP_RDTSC) /* rdtsc requires emulation in ring 3 as well */ + { + Log(("WARNING: refusing instruction emulation for user-mode code!!\n")); + STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedUserMode)); + return VERR_EM_INTERPRETER; + } } } else @@ -3836,10 +3903,10 @@ DECLINLINE(VBOXSTRICTRC) emInterpretInstructionCPU(PVM pVM, PVMCPU pVCpu, PDISCP #ifdef IN_RC INTERPRET_CASE(OP_STI,Sti); INTERPRET_CASE(OP_XADD, XAdd); + INTERPRET_CASE(OP_IRET,Iret); #endif INTERPRET_CASE(OP_CMPXCHG8B, CmpXchg8b); INTERPRET_CASE(OP_HLT,Hlt); - INTERPRET_CASE(OP_IRET,Iret); INTERPRET_CASE(OP_WBINVD,WbInvd); #ifdef VBOX_WITH_STATISTICS # ifndef IN_RC diff --git a/src/VBox/VMM/VMMAll/EMAllA.asm b/src/VBox/VMM/VMMAll/EMAllA.asm index bb1eee82..46c9d3f8 100644 --- a/src/VBox/VMM/VMMAll/EMAllA.asm +++ b/src/VBox/VMM/VMMAll/EMAllA.asm @@ -4,7 +4,7 @@ ; ; -; Copyright (C) 2006-2007 Oracle Corporation +; Copyright (C) 2006-2011 Oracle Corporation ; ; This file is part of VirtualBox Open Source Edition (OSE), as ; available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMAll/FTMAll.cpp b/src/VBox/VMM/VMMAll/FTMAll.cpp index e47c3fce..0712804d 100644 --- a/src/VBox/VMM/VMMAll/FTMAll.cpp +++ b/src/VBox/VMM/VMMAll/FTMAll.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2010 Oracle Corporation + * Copyright (C) 2010-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -38,7 +38,7 @@ * @param pVM Pointer to the VM. * @param enmType Checkpoint type */ -VMMDECL(int) FTMSetCheckpoint(PVM pVM, FTMCHECKPOINTTYPE enmType) +VMM_INT_DECL(int) FTMSetCheckpoint(PVM pVM, FTMCHECKPOINTTYPE enmType) { if (!pVM->fFaultTolerantMaster) return VINF_SUCCESS; @@ -58,7 +58,7 @@ VMMDECL(int) FTMSetCheckpoint(PVM pVM, FTMCHECKPOINTTYPE enmType) * * @param pVM Pointer to the VM. */ -VMMDECL(bool) FTMIsDeltaLoadSaveActive(PVM pVM) +VMM_INT_DECL(bool) FTMIsDeltaLoadSaveActive(PVM pVM) { return pVM->ftm.s.fDeltaLoadSaveActive; } diff --git a/src/VBox/VMM/VMMAll/HMAll.cpp b/src/VBox/VMM/VMMAll/HMAll.cpp new file mode 100644 index 00000000..eaa4b4e6 --- /dev/null +++ b/src/VBox/VMM/VMMAll/HMAll.cpp @@ -0,0 +1,449 @@ +/* $Id: HMAll.cpp $ */ +/** @file + * HM - All contexts. + */ + +/* + * Copyright (C) 2006-2013 Oracle Corporation + * + * This file is part of VirtualBox Open Source Edition (OSE), as + * available from http://www.virtualbox.org. This file is free software; + * you can redistribute it and/or modify it under the terms of the GNU + * General Public License (GPL) as published by the Free Software + * Foundation, in version 2 as it comes in the "COPYING" file of the + * VirtualBox OSE distribution. VirtualBox OSE is distributed in the + * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. + */ + + +/******************************************************************************* +* Header Files * +*******************************************************************************/ +#define LOG_GROUP LOG_GROUP_HM +#include <VBox/vmm/hm.h> +#include <VBox/vmm/pgm.h> +#include "HMInternal.h" +#include <VBox/vmm/vm.h> +#include <VBox/vmm/hm_vmx.h> +#include <VBox/vmm/hm_svm.h> +#include <VBox/err.h> +#include <VBox/log.h> +#include <iprt/param.h> +#include <iprt/assert.h> +#include <iprt/asm.h> +#include <iprt/string.h> +#include <iprt/thread.h> +#include <iprt/x86.h> +#include <iprt/asm-amd64-x86.h> + + + +/** + * Checks whether HM (VT-x/AMD-V) is being used by this VM. + * + * @retval @c true if used. + * @retval @c false if software virtualization (raw-mode) is used. + * @param pVM The cross context VM structure. + * @sa HMIsEnabled, HMR3IsEnabled + * @internal + */ +VMMDECL(bool) HMIsEnabledNotMacro(PVM pVM) +{ + Assert(pVM->fHMEnabledFixed); + return pVM->fHMEnabled; +} + + +/** + * Queues a page for invalidation + * + * @returns VBox status code. + * @param pVCpu Pointer to the VMCPU. + * @param GCVirt Page to invalidate + */ +static void hmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt) +{ + /* Nothing to do if a TLB flush is already pending */ + if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH)) + return; +#if 1 + VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); + NOREF(GCVirt); +#else + /* Be very careful when activating this code! */ + if (iPage == RT_ELEMENTS(pVCpu->hm.s.TlbShootdown.aPages)) + VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); + else + VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); +#endif +} + +/** + * Invalidates a guest page + * + * @returns VBox status code. + * @param pVCpu Pointer to the VMCPU. + * @param GCVirt Page to invalidate + */ +VMM_INT_DECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt) +{ + STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual); +#ifdef IN_RING0 + PVM pVM = pVCpu->CTX_SUFF(pVM); + if (pVM->hm.s.vmx.fSupported) + return VMXR0InvalidatePage(pVM, pVCpu, GCVirt); + + Assert(pVM->hm.s.svm.fSupported); + return SVMR0InvalidatePage(pVM, pVCpu, GCVirt); + +#else + hmQueueInvlPage(pVCpu, GCVirt); + return VINF_SUCCESS; +#endif +} + +/** + * Flushes the guest TLB. + * + * @returns VBox status code. + * @param pVCpu Pointer to the VMCPU. + */ +VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu) +{ + LogFlow(("HMFlushTLB\n")); + + VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); + STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbManual); + return VINF_SUCCESS; +} + +#ifdef IN_RING0 + +/** + * Dummy RTMpOnSpecific handler since RTMpPokeCpu couldn't be used. + * + */ +static DECLCALLBACK(void) hmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2) +{ + NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2); + return; +} + +/** + * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED. + */ +static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu) +{ + uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits); + + STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatPoke, x); + int rc = RTMpPokeCpu(idHostCpu); + STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPoke, x); + + /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall + back to a less efficient implementation (broadcast). */ + if (rc == VERR_NOT_SUPPORTED) + { + STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z); + /* synchronous. */ + RTMpOnSpecific(idHostCpu, hmFlushHandler, 0, 0); + STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z); + } + else + { + if (rc == VINF_SUCCESS) + STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z); + else + STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPokeFailed, z); + +/** @todo If more than one CPU is going to be poked, we could optimize this + * operation by poking them first and wait afterwards. Would require + * recording who to poke and their current cWorldSwitchExits values, + * that's something not suitable for stack... So, pVCpu->hm.s.something + * then. */ + /* Spin until the VCPU has switched back (poking is async). */ + while ( ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush) + && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits)) + ASMNopPause(); + + if (rc == VINF_SUCCESS) + STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z); + else + STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPokeFailed, z); + } +} + +#endif /* IN_RING0 */ +#ifndef IN_RC + +/** + * Poke an EMT so it can perform the appropriate TLB shootdowns. + * + * @param pVCpu The handle of the virtual CPU to poke. + * @param fAccountFlushStat Whether to account the call to + * StatTlbShootdownFlush or StatTlbShootdown. + */ +static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat) +{ + if (ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush)) + { + if (fAccountFlushStat) + STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdownFlush); + else + STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown); +#ifdef IN_RING0 + RTCPUID idHostCpu = pVCpu->hm.s.idEnteredCpu; + if (idHostCpu != NIL_RTCPUID) + hmR0PokeCpu(pVCpu, idHostCpu); +#else + VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE); +#endif + } + else + STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual); +} + + +/** + * Invalidates a guest page on all VCPUs. + * + * @returns VBox status code. + * @param pVM Pointer to the VM. + * @param GCVirt Page to invalidate + */ +VMM_INT_DECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCPtr) +{ + VMCPUID idCurCpu = VMMGetCpuId(pVM); + STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hm.s.StatFlushPage); + + for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) + { + PVMCPU pVCpu = &pVM->aCpus[idCpu]; + + /* Nothing to do if a TLB flush is already pending; the VCPU should + have already been poked if it were active. */ + if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH)) + continue; + + if (pVCpu->idCpu == idCurCpu) + HMInvalidatePage(pVCpu, GCPtr); + else + { + hmQueueInvlPage(pVCpu, GCPtr); + hmPokeCpuForTlbFlush(pVCpu, false /* fAccountFlushStat */); + } + } + + return VINF_SUCCESS; +} + + +/** + * Flush the TLBs of all VCPUs. + * + * @returns VBox status code. + * @param pVM Pointer to the VM. + */ +VMM_INT_DECL(int) HMFlushTLBOnAllVCpus(PVM pVM) +{ + if (pVM->cCpus == 1) + return HMFlushTLB(&pVM->aCpus[0]); + + VMCPUID idThisCpu = VMMGetCpuId(pVM); + + STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hm.s.StatFlushTlb); + + for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) + { + PVMCPU pVCpu = &pVM->aCpus[idCpu]; + + /* Nothing to do if a TLB flush is already pending; the VCPU should + have already been poked if it were active. */ + if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH)) + { + VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); + if (idThisCpu != idCpu) + hmPokeCpuForTlbFlush(pVCpu, true /* fAccountFlushStat */); + } + } + + return VINF_SUCCESS; +} + +#endif /* !IN_RC */ + +/** + * Checks if nested paging is enabled + * + * @returns boolean + * @param pVM Pointer to the VM. + */ +VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM) +{ + return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging; +} + +/** + * Return the shadow paging mode for nested paging/ept + * + * @returns shadow paging mode + * @param pVM Pointer to the VM. + */ +VMM_INT_DECL(PGMMODE) HMGetShwPagingMode(PVM pVM) +{ + Assert(HMIsNestedPagingActive(pVM)); + if (pVM->hm.s.svm.fSupported) + return PGMMODE_NESTED; + + Assert(pVM->hm.s.vmx.fSupported); + return PGMMODE_EPT; +} + +/** + * Invalidates a guest page by physical address + * + * NOTE: Assumes the current instruction references this physical page though a virtual address!! + * + * @returns VBox status code. + * @param pVM Pointer to the VM. + * @param GCPhys Page to invalidate + */ +VMM_INT_DECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys) +{ + if (!HMIsNestedPagingActive(pVM)) + return VINF_SUCCESS; + +#ifdef IN_RING0 + if (pVM->hm.s.vmx.fSupported) + { + VMCPUID idThisCpu = VMMGetCpuId(pVM); + + for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) + { + PVMCPU pVCpu = &pVM->aCpus[idCpu]; + + if (idThisCpu == idCpu) + { + /** @todo r=ramshankar: Intel does not support flushing by guest physical + * address either. See comment in VMXR0InvalidatePhysPage(). Fix this. */ + VMXR0InvalidatePhysPage(pVM, pVCpu, GCPhys); + } + else + { + VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); + hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/); + } + } + return VINF_SUCCESS; + } + + /* AMD-V doesn't support invalidation with guest physical addresses; see + comment in SVMR0InvalidatePhysPage. */ + Assert(pVM->hm.s.svm.fSupported); +#else + NOREF(GCPhys); +#endif + + HMFlushTLBOnAllVCpus(pVM); + return VINF_SUCCESS; +} + +/** + * Checks if an interrupt event is currently pending. + * + * @returns Interrupt event pending state. + * @param pVM Pointer to the VM. + */ +VMM_INT_DECL(bool) HMHasPendingIrq(PVM pVM) +{ + PVMCPU pVCpu = VMMGetCpu(pVM); + return !!pVCpu->hm.s.Event.fPending; +} + + +/** + * Return the PAE PDPE entries. + * + * @returns Pointer to the PAE PDPE array. + * @param pVCpu Pointer to the VMCPU. + */ +VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu) +{ + return &pVCpu->hm.s.aPdpes[0]; +} + + +/** + * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode, + * incorrect code bytes may be fetched after a world-switch". + * + * @param pu32Family Where to store the CPU family (can be NULL). + * @param pu32Model Where to store the CPU model (can be NULL). + * @param pu32Stepping Where to store the CPU stepping (can be NULL). + * @returns true if the erratum applies, false otherwise. + */ +VMM_INT_DECL(int) HMAmdIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping) +{ + /* + * Erratum 170 which requires a forced TLB flush for each world switch: + * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors". + * + * All BH-G1/2 and DH-G1/2 models include a fix: + * Athlon X2: 0x6b 1/2 + * 0x68 1/2 + * Athlon 64: 0x7f 1 + * 0x6f 2 + * Sempron: 0x7f 1/2 + * 0x6f 2 + * 0x6c 2 + * 0x7c 2 + * Turion 64: 0x68 2 + */ + uint32_t u32Dummy; + uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily; + ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy); + u32BaseFamily = (u32Version >> 8) & 0xf; + u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0); + u32Model = ((u32Version >> 4) & 0xf); + u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4); + u32Stepping = u32Version & 0xf; + + bool fErratumApplies = false; + if ( u32Family == 0xf + && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1) + && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2)) + { + fErratumApplies = true; + } + + if (pu32Family) + *pu32Family = u32Family; + if (pu32Model) + *pu32Model = u32Model; + if (pu32Stepping) + *pu32Stepping = u32Stepping; + + return fErratumApplies; +} + + +/** + * Sets or clears the single instruction flag. + * + * When set, HM will try its best to return to ring-3 after executing a single + * instruction. This can be used for debugging. See also + * EMR3HmSingleInstruction. + * + * @returns The old flag state. + * @param pVCpu Pointer to the cross context CPU structure of + * the calling EMT. + * @param fEnable The new flag state. + */ +VMM_INT_DECL(bool) HMSetSingleInstruction(PVMCPU pVCpu, bool fEnable) +{ + VMCPU_ASSERT_EMT(pVCpu); + bool fOld = pVCpu->hm.s.fSingleInstruction; + pVCpu->hm.s.fSingleInstruction = fEnable; + return fOld; +} + diff --git a/src/VBox/VMM/VMMAll/HWACCMAll.cpp b/src/VBox/VMM/VMMAll/HWACCMAll.cpp deleted file mode 100644 index 838e1510..00000000 --- a/src/VBox/VMM/VMMAll/HWACCMAll.cpp +++ /dev/null @@ -1,339 +0,0 @@ -/* $Id: HWACCMAll.cpp $ */ -/** @file - * HWACCM - All contexts. - */ - -/* - * Copyright (C) 2006-2012 Oracle Corporation - * - * This file is part of VirtualBox Open Source Edition (OSE), as - * available from http://www.virtualbox.org. This file is free software; - * you can redistribute it and/or modify it under the terms of the GNU - * General Public License (GPL) as published by the Free Software - * Foundation, in version 2 as it comes in the "COPYING" file of the - * VirtualBox OSE distribution. VirtualBox OSE is distributed in the - * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. - */ - - -/******************************************************************************* -* Header Files * -*******************************************************************************/ -#define LOG_GROUP LOG_GROUP_HWACCM -#include <VBox/vmm/hwaccm.h> -#include <VBox/vmm/pgm.h> -#include "HWACCMInternal.h" -#include <VBox/vmm/vm.h> -#include <VBox/vmm/hwacc_vmx.h> -#include <VBox/vmm/hwacc_svm.h> -#include <VBox/err.h> -#include <VBox/log.h> -#include <iprt/param.h> -#include <iprt/assert.h> -#include <iprt/asm.h> -#include <iprt/string.h> -#include <iprt/x86.h> - - -/** - * Queues a page for invalidation - * - * @returns VBox status code. - * @param pVCpu Pointer to the VMCPU. - * @param GCVirt Page to invalidate - */ -static void hwaccmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt) -{ - /* Nothing to do if a TLB flush is already pending */ - if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH)) - return; -#if 1 - VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); - NOREF(GCVirt); -#else - Be very careful when activating this code! - if (iPage == RT_ELEMENTS(pVCpu->hwaccm.s.TlbShootdown.aPages)) - VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); - else - VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); -#endif -} - -/** - * Invalidates a guest page - * - * @returns VBox status code. - * @param pVCpu Pointer to the VMCPU. - * @param GCVirt Page to invalidate - */ -VMMDECL(int) HWACCMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt) -{ - STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageManual); -#ifdef IN_RING0 - PVM pVM = pVCpu->CTX_SUFF(pVM); - if (pVM->hwaccm.s.vmx.fSupported) - return VMXR0InvalidatePage(pVM, pVCpu, GCVirt); - - Assert(pVM->hwaccm.s.svm.fSupported); - return SVMR0InvalidatePage(pVM, pVCpu, GCVirt); - -#else - hwaccmQueueInvlPage(pVCpu, GCVirt); - return VINF_SUCCESS; -#endif -} - -/** - * Flushes the guest TLB - * - * @returns VBox status code. - * @param pVCpu Pointer to the VMCPU. - */ -VMMDECL(int) HWACCMFlushTLB(PVMCPU pVCpu) -{ - LogFlow(("HWACCMFlushTLB\n")); - - VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); - STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBManual); - return VINF_SUCCESS; -} - -#ifdef IN_RING0 - -/** - * Dummy RTMpOnSpecific handler since RTMpPokeCpu couldn't be used. - * - */ -static DECLCALLBACK(void) hwaccmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2) -{ - NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2); - return; -} - -/** - * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED. - */ -static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu) -{ - uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hwaccm.s.cWorldSwitchExits); - - STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatPoke, x); - int rc = RTMpPokeCpu(idHostCpu); - STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatPoke, x); - - /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall - back to a less efficient implementation (broadcast). */ - if (rc == VERR_NOT_SUPPORTED) - { - STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatSpinPoke, z); - /* synchronous. */ - RTMpOnSpecific(idHostCpu, hwaccmFlushHandler, 0, 0); - STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatSpinPoke, z); - } - else - { - if (rc == VINF_SUCCESS) - STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatSpinPoke, z); - else - STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatSpinPokeFailed, z); - -/** @todo If more than one CPU is going to be poked, we could optimize this - * operation by poking them first and wait afterwards. Would require - * recording who to poke and their current cWorldSwitchExits values, - * that's something not suitable for stack... So, pVCpu->hm.s.something - * then. */ - /* Spin until the VCPU has switched back (poking is async). */ - while ( ASMAtomicUoReadBool(&pVCpu->hwaccm.s.fCheckedTLBFlush) - && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hwaccm.s.cWorldSwitchExits)) - ASMNopPause(); - - if (rc == VINF_SUCCESS) - STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatSpinPoke, z); - else - STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatSpinPokeFailed, z); - } -} - -#endif /* IN_RING0 */ -#ifndef IN_RC - -/** - * Poke an EMT so it can perform the appropriate TLB shootdowns. - * - * @param pVCpu The handle of the virtual CPU to poke. - * @param fAccountFlushStat Whether to account the call to - * StatTlbShootdownFlush or StatTlbShootdown. - */ -static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat) -{ - if (ASMAtomicUoReadBool(&pVCpu->hwaccm.s.fCheckedTLBFlush)) - { - if (fAccountFlushStat) - STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdownFlush); - else - STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown); -#ifdef IN_RING0 - RTCPUID idHostCpu = pVCpu->hwaccm.s.idEnteredCpu; - if (idHostCpu != NIL_RTCPUID) - hmR0PokeCpu(pVCpu, idHostCpu); -#else - VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE); -#endif - } - else - STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageManual); -} - - -/** - * Invalidates a guest page on all VCPUs. - * - * @returns VBox status code. - * @param pVM Pointer to the VM. - * @param GCVirt Page to invalidate - */ -VMMDECL(int) HWACCMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCPtr) -{ - VMCPUID idCurCpu = VMMGetCpuId(pVM); - STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hwaccm.s.StatFlushPage); - - for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) - { - PVMCPU pVCpu = &pVM->aCpus[idCpu]; - - /* Nothing to do if a TLB flush is already pending; the VCPU should - have already been poked if it were active. */ - if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH)) - continue; - - if (pVCpu->idCpu == idCurCpu) - HWACCMInvalidatePage(pVCpu, GCPtr); - else - { - hwaccmQueueInvlPage(pVCpu, GCPtr); - hmPokeCpuForTlbFlush(pVCpu, false /*fAccountFlushStat*/); - } - } - - return VINF_SUCCESS; -} - - -/** - * Flush the TLBs of all VCPUs - * - * @returns VBox status code. - * @param pVM Pointer to the VM. - */ -VMMDECL(int) HWACCMFlushTLBOnAllVCpus(PVM pVM) -{ - if (pVM->cCpus == 1) - return HWACCMFlushTLB(&pVM->aCpus[0]); - - VMCPUID idThisCpu = VMMGetCpuId(pVM); - - STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hwaccm.s.StatFlushTLB); - - for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) - { - PVMCPU pVCpu = &pVM->aCpus[idCpu]; - - /* Nothing to do if a TLB flush is already pending; the VCPU should - have already been poked if it were active. */ - if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH)) - { - VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); - if (idThisCpu != idCpu) - hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/); - } - } - - return VINF_SUCCESS; -} - -#endif /* !IN_RC */ - -/** - * Checks if nested paging is enabled - * - * @returns boolean - * @param pVM Pointer to the VM. - */ -VMMDECL(bool) HWACCMIsNestedPagingActive(PVM pVM) -{ - return HWACCMIsEnabled(pVM) && pVM->hwaccm.s.fNestedPaging; -} - -/** - * Return the shadow paging mode for nested paging/ept - * - * @returns shadow paging mode - * @param pVM Pointer to the VM. - */ -VMMDECL(PGMMODE) HWACCMGetShwPagingMode(PVM pVM) -{ - Assert(HWACCMIsNestedPagingActive(pVM)); - if (pVM->hwaccm.s.svm.fSupported) - return PGMMODE_NESTED; - - Assert(pVM->hwaccm.s.vmx.fSupported); - return PGMMODE_EPT; -} - -/** - * Invalidates a guest page by physical address - * - * NOTE: Assumes the current instruction references this physical page though a virtual address!! - * - * @returns VBox status code. - * @param pVM Pointer to the VM. - * @param GCPhys Page to invalidate - */ -VMMDECL(int) HWACCMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys) -{ - if (!HWACCMIsNestedPagingActive(pVM)) - return VINF_SUCCESS; - -#ifdef IN_RING0 - if (pVM->hwaccm.s.vmx.fSupported) - { - VMCPUID idThisCpu = VMMGetCpuId(pVM); - - for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) - { - PVMCPU pVCpu = &pVM->aCpus[idCpu]; - - if (idThisCpu == idCpu) - VMXR0InvalidatePhysPage(pVM, pVCpu, GCPhys); - else - { - VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); - hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/); - } - } - return VINF_SUCCESS; - } - - /* AMD-V doesn't support invalidation with guest physical addresses; see - comment in SVMR0InvalidatePhysPage. */ - Assert(pVM->hwaccm.s.svm.fSupported); -#else - NOREF(GCPhys); -#endif - - HWACCMFlushTLBOnAllVCpus(pVM); - return VINF_SUCCESS; -} - -/** - * Checks if an interrupt event is currently pending. - * - * @returns Interrupt event pending state. - * @param pVM Pointer to the VM. - */ -VMMDECL(bool) HWACCMHasPendingIrq(PVM pVM) -{ - PVMCPU pVCpu = VMMGetCpu(pVM); - return !!pVCpu->hwaccm.s.Event.fPending; -} - diff --git a/src/VBox/VMM/VMMAll/IEMAll.cpp b/src/VBox/VMM/VMMAll/IEMAll.cpp index 458376e1..8f4c26a7 100644 --- a/src/VBox/VMM/VMMAll/IEMAll.cpp +++ b/src/VBox/VMM/VMMAll/IEMAll.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2011-2012 Oracle Corporation + * Copyright (C) 2011-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -82,14 +82,20 @@ #define LOG_GROUP LOG_GROUP_IEM #include <VBox/vmm/iem.h> #include <VBox/vmm/cpum.h> +#include <VBox/vmm/pdm.h> #include <VBox/vmm/pgm.h> #include <internal/pgm.h> #include <VBox/vmm/iom.h> #include <VBox/vmm/em.h> +#include <VBox/vmm/hm.h> #include <VBox/vmm/tm.h> #include <VBox/vmm/dbgf.h> +#include <VBox/vmm/dbgftrace.h> #ifdef VBOX_WITH_RAW_MODE_NOT_R0 # include <VBox/vmm/patm.h> +# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES) +# include <VBox/vmm/csam.h> +# endif #endif #include "IEMInternal.h" #ifdef IEM_VERIFICATION_MODE_FULL @@ -100,11 +106,14 @@ #include <VBox/log.h> #include <VBox/err.h> #include <VBox/param.h> +#include <VBox/dis.h> +#include <VBox/disopcode.h> #include <iprt/assert.h> #include <iprt/string.h> #include <iprt/x86.h> + /******************************************************************************* * Structures and Typedefs * *******************************************************************************/ @@ -125,7 +134,7 @@ #if defined(__GNUC__) && defined(RT_ARCH_X86) typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu); # define FNIEMOP_DEF(a_Name) \ - static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu) + static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu) # define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \ static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) # define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \ @@ -202,7 +211,7 @@ typedef IEMSELDESC *PIEMSELDESC; #ifdef LOG_ENABLED # define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \ do { \ - Log(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \ + /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \ return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \ } while (0) #else @@ -219,7 +228,8 @@ typedef IEMSELDESC *PIEMSELDESC; #ifdef LOG_ENABLED # define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \ do { \ - LogFunc(a_LoggerArgs); \ + LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \ + /*LogFunc(a_LoggerArgs);*/ \ return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \ } while (0) #else @@ -299,19 +309,31 @@ typedef IEMSELDESC *PIEMSELDESC; || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) ) /** + * Checks if an Intel CPUID feature is present. + */ +#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(a_fEcx) \ + ( iemRegIsIntelCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx)) ) + +/** + * Checks if an Intel CPUID feature is present in the host CPU. + */ +#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(a_fEdx) \ + ( (a_fEdx) & pIemCpu->fHostCpuIdStdFeaturesEdx ) + +/** * Evaluates to true if we're presenting an Intel CPU to the guest. */ -#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) (true) /** @todo determin this once and store it the CPU structure */ +#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL ) /** * Evaluates to true if we're presenting an AMD CPU to the guest. */ -#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) (false) /** @todo determin this once and store it the CPU structure */ +#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD ) /** * Check if the address is canonical. */ -#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000)) +#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr) /******************************************************************************* @@ -635,6 +657,34 @@ static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd = }; +/** Function table for the PUNPCKLBW instruction */ +static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 }; +/** Function table for the PUNPCKLBD instruction */ +static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 }; +/** Function table for the PUNPCKLDQ instruction */ +static const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 }; +/** Function table for the PUNPCKLQDQ instruction */ +static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 }; + +/** Function table for the PUNPCKHBW instruction */ +static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 }; +/** Function table for the PUNPCKHBD instruction */ +static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 }; +/** Function table for the PUNPCKHDQ instruction */ +static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 }; +/** Function table for the PUNPCKHQDQ instruction */ +static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 }; + +/** Function table for the PXOR instruction */ +static const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 }; +/** Function table for the PCMPEQB instruction */ +static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 }; +/** Function table for the PCMPEQW instruction */ +static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 }; +/** Function table for the PCMPEQD instruction */ +static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 }; + + #if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES) /** What IEM just wrote. */ uint8_t g_abIemWrote[256]; @@ -647,6 +697,8 @@ size_t g_cbIemWrote; * Internal Functions * *******************************************************************************/ static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu); +static VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu); +static VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel); /*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/ static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel); static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr); @@ -663,9 +715,11 @@ static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess); static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); +static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); +static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); -static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel); +static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt); static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp); static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp); static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel); @@ -678,6 +732,7 @@ static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue); + /** * Sets the pass up status. * @@ -693,7 +748,7 @@ static int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp) int32_t const rcOldPassUp = pIemCpu->rcPassUp; if (rcOldPassUp == VINF_SUCCESS) pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp); - /* If both are EM scheduling code, use EM priority rules. */ + /* If both are EM scheduling codes, use EM priority rules. */ else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST) { @@ -719,6 +774,67 @@ static int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp) /** + * Initializes the execution state. + * + * @param pIemCpu The per CPU IEM state. + * @param fBypassHandlers Whether to bypass access handlers. + */ +DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers) +{ + PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); + PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); + +#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0)) + Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs)); + Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss)); + Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es)); + Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds)); + Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs)); + Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs)); + Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr)); + Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr)); +#endif + +#ifdef VBOX_WITH_RAW_MODE_NOT_R0 + CPUMGuestLazyLoadHiddenCsAndSs(pVCpu); +#endif + pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu); + IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx) + ? IEMMODE_64BIT + : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */ + ? IEMMODE_32BIT + : IEMMODE_16BIT; + pIemCpu->enmCpuMode = enmMode; +#ifdef VBOX_STRICT + pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe; + pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe; + pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe; + pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe; + pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef; + pIemCpu->uRexReg = 127; + pIemCpu->uRexB = 127; + pIemCpu->uRexIndex = 127; + pIemCpu->iEffSeg = 127; + pIemCpu->offOpcode = 127; + pIemCpu->cbOpcode = 127; +#endif + + pIemCpu->cActiveMappings = 0; + pIemCpu->iNextMapping = 0; + pIemCpu->rcPassUp = VINF_SUCCESS; + pIemCpu->fBypassHandlers = fBypassHandlers; +#ifdef VBOX_WITH_RAW_MODE_NOT_R0 + pIemCpu->fInPatchCode = pIemCpu->uCpl == 0 + && pCtx->cs.u64Base == 0 + && pCtx->cs.u32Limit == UINT32_MAX + && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip); + if (!pIemCpu->fInPatchCode) + CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS); +#endif +} + + +/** * Initializes the decoder state. * * @param pIemCpu The per CPU IEM state. @@ -756,8 +872,16 @@ DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers) pIemCpu->enmCpuMode = enmMode; pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */ pIemCpu->enmEffAddrMode = enmMode; - pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */ - pIemCpu->enmEffOpSize = enmMode; + if (enmMode != IEMMODE_64BIT) + { + pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */ + pIemCpu->enmEffOpSize = enmMode; + } + else + { + pIemCpu->enmDefOpSize = IEMMODE_32BIT; + pIemCpu->enmEffOpSize = IEMMODE_32BIT; + } pIemCpu->fPrefixes = 0; pIemCpu->uRexReg = 0; pIemCpu->uRexB = 0; @@ -769,7 +893,29 @@ DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers) pIemCpu->iNextMapping = 0; pIemCpu->rcPassUp = VINF_SUCCESS; pIemCpu->fBypassHandlers = fBypassHandlers; +#ifdef VBOX_WITH_RAW_MODE_NOT_R0 + pIemCpu->fInPatchCode = pIemCpu->uCpl == 0 + && pCtx->cs.u64Base == 0 + && pCtx->cs.u32Limit == UINT32_MAX + && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip); + if (!pIemCpu->fInPatchCode) + CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS); +#endif +#ifdef DBGFTRACE_ENABLED + switch (enmMode) + { + case IEMMODE_64BIT: + RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip); + break; + case IEMMODE_32BIT: + RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip); + break; + case IEMMODE_16BIT: + RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip); + break; + } +#endif } @@ -810,25 +956,26 @@ static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypa if (GCPtrPC32 > pCtx->cs.u32Limit) return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1; + if (!cbToTryRead) /* overflowed */ + { + Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX); + cbToTryRead = UINT32_MAX; + } GCPtrPC = pCtx->cs.u64Base + GCPtrPC32; } -#if defined(IN_RC) && defined(VBOX_WITH_RAW_MODE) +#ifdef VBOX_WITH_RAW_MODE_NOT_R0 /* Allow interpretation of patch manager code blocks since they can for instance throw #PFs for perfectly good reasons. */ - if ( (pCtx->cs.Sel & X86_SEL_RPL) == 1 - && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), GCPtrPC)) + if (pIemCpu->fInPatchCode) { - uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK); - if (cbToTryRead > cbLeftOnPage) - cbToTryRead = cbLeftOnPage; - if (cbToTryRead > sizeof(pIemCpu->abOpcode)) - cbToTryRead = sizeof(pIemCpu->abOpcode); - memcpy(pIemCpu->abOpcode, (void const *)(uintptr_t)GCPtrPC, cbToTryRead); - pIemCpu->cbOpcode = cbToTryRead; + size_t cbRead = 0; + int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead); + AssertRCReturn(rc, rc); + pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0); return VINF_SUCCESS; } -#endif +#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ RTGCPHYS GCPhys; uint64_t fFlags; @@ -874,25 +1021,38 @@ static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypa /* * Read the bytes at this address. */ - uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK); - if (cbToTryRead > cbLeftOnPage) - cbToTryRead = cbLeftOnPage; - if (cbToTryRead > sizeof(pIemCpu->abOpcode)) - cbToTryRead = sizeof(pIemCpu->abOpcode); - /** @todo PATM: Read original, unpatched bytes? EMAll.cpp doesn't seem to be - * doing that. */ - if (!pIemCpu->fBypassHandlers) - rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead); + PVM pVM = IEMCPU_TO_VM(pIemCpu); +#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0) + size_t cbActual; + if ( PATMIsEnabled(pVM) + && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual))) + { + Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC)); + Assert(cbActual > 0); + pIemCpu->cbOpcode = (uint8_t)cbActual; + } else - rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead); - if (rc != VINF_SUCCESS) +#endif { - /** @todo status code handling */ - Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n", - GCPtrPC, GCPhys, rc, cbToTryRead)); - return rc; + uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK); + if (cbToTryRead > cbLeftOnPage) + cbToTryRead = cbLeftOnPage; + if (cbToTryRead > sizeof(pIemCpu->abOpcode)) + cbToTryRead = sizeof(pIemCpu->abOpcode); + + if (!pIemCpu->fBypassHandlers) + rc = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead); + else + rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead); + if (rc != VINF_SUCCESS) + { + /** @todo status code handling */ + Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n", + GCPtrPC, GCPhys, rc, cbToTryRead)); + return rc; + } + pIemCpu->cbOpcode = cbToTryRead; } - pIemCpu->cbOpcode = cbToTryRead; return VINF_SUCCESS; } @@ -904,7 +1064,8 @@ static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypa * * @returns Strict VBox status code. * @param pIemCpu The IEM state. - * @param cbMin Where to return the opcode byte. + * @param cbMin The minimum number of bytes relative offOpcode + * that must be read. */ static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin) { @@ -923,8 +1084,6 @@ static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin) GCPtrNext = pCtx->rip + pIemCpu->cbOpcode; if (!IEM_IS_CANONICAL(GCPtrNext)) return iemRaiseGeneralProtectionFault0(pIemCpu); - cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK); - Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */ } else { @@ -934,11 +1093,39 @@ static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin) if (GCPtrNext32 > pCtx->cs.u32Limit) return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1; + if (!cbToTryRead) /* overflowed */ + { + Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX); + cbToTryRead = UINT32_MAX; + /** @todo check out wrapping around the code segment. */ + } if (cbToTryRead < cbMin - cbLeft) return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); GCPtrNext = pCtx->cs.u64Base + GCPtrNext32; } + /* Only read up to the end of the page, and make sure we don't read more + than the opcode buffer can hold. */ + uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK); + if (cbToTryRead > cbLeftOnPage) + cbToTryRead = cbLeftOnPage; + if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode) + cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode; + Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */ + +#ifdef VBOX_WITH_RAW_MODE_NOT_R0 + /* Allow interpretation of patch manager code blocks since they can for + instance throw #PFs for perfectly good reasons. */ + if (pIemCpu->fInPatchCode) + { + size_t cbRead = 0; + int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead); + AssertRCReturn(rc, rc); + pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0); + return VINF_SUCCESS; + } +#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ + RTGCPHYS GCPhys; uint64_t fFlags; int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys); @@ -965,13 +1152,11 @@ static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin) /* * Read the bytes at this address. + * + * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already, + * and since PATM should only patch the start of an instruction there + * should be no need to check again here. */ - uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK); - if (cbToTryRead > cbLeftOnPage) - cbToTryRead = cbLeftOnPage; - if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode) - cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode; - Assert(cbToTryRead >= cbMin - cbLeft); if (!pIemCpu->fBypassHandlers) rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead); else @@ -1742,14 +1927,21 @@ static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL interrupts with SS=0 in long mode). */ if (!(NewSS & X86_SEL_MASK_OFF_RPL)) { - Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS)); - return iemRaiseGeneralProtectionFault0(pIemCpu); + Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #TS(0)\n", NewSS)); + return iemRaiseTaskSwitchFault0(pIemCpu); + } + + /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */ + if ((NewSS & X86_SEL_RPL) != uCpl) + { + Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl)); + return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS); } /* * Read the descriptor. */ - VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS); + VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS); if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -1758,26 +1950,20 @@ static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL */ if (!pDesc->Legacy.Gen.u1DescType) { - Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type)); - return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS); + Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type)); + return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS); } if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) ) { - Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type)); - return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS); - } - /** @todo testcase: check if the TSS.ssX RPL is checked. */ - if ((NewSS & X86_SEL_RPL) != uCpl) - { - Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #GP\n", NewSS, uCpl)); - return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS); + Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type)); + return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS); } if (pDesc->Legacy.Gen.u2Dpl != uCpl) { - Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl)); - return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS); + Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl)); + return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS); } /* Is it there? */ @@ -1844,7 +2030,8 @@ static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL #define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0) /** External interrupt (from PIC, APIC, whatever). */ #define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1) -/** Software interrupt (int, into or bound). */ +/** Software interrupt (int or into, not bound). + * Returns to the following instruction */ #define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2) /** Takes an error code. */ #define IEM_XCPT_FLAGS_ERR RT_BIT_32(3) @@ -1852,8 +2039,11 @@ static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL #define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4) /** Generated by the breakpoint instruction. */ #define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5) +/** Generated by a DRx instruction breakpoint and RF should be cleared. */ +#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6) /** @} */ + /** * Loads the specified stack far pointer from the TSS. * @@ -1930,6 +2120,40 @@ static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pC /** + * Loads the specified stack pointer from the 64-bit TSS. + * + * @returns VBox strict status code. + * @param pIemCpu The IEM per CPU instance data. + * @param pCtx The CPU context. + * @param uCpl The CPL to load the stack for. + * @param uIst The interrupt stack table index, 0 if to use uCpl. + * @param puRsp Where to return the new stack pointer. + */ +static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, + uint64_t *puRsp) +{ + Assert(uCpl < 4); + Assert(uIst < 8); + *puRsp = 0; /* make gcc happy */ + + AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_INTERNAL_ERROR_2); + + uint32_t off; + if (uIst) + off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1); + else + off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0); + if (off + sizeof(uint64_t) > pCtx->tr.u32Limit) + { + Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit)); + return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu); + } + + return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off); +} + + +/** * Adjust the CPU state according to the exception being raised. * * @param pCtx The CPU context. @@ -1998,7 +2222,7 @@ iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu, uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx); pu16Frame[2] = (uint16_t)fEfl; pu16Frame[1] = (uint16_t)pCtx->cs.Sel; - pu16Frame[0] = pCtx->ip + cbInstr; + pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip; rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp); if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) return rcStrict; @@ -2025,6 +2249,32 @@ iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu, /** + * Loads a NULL data selector into when coming from V8086 mode. + * + * @param pIemCpu The IEM per CPU instance data. + * @param pSReg Pointer to the segment register. + */ +static void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg) +{ + pSReg->Sel = 0; + pSReg->ValidSel = 0; + if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu)) + { + /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */ + pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D; + pSReg->Attr.u |= X86DESCATTR_UNUSABLE; + } + else + { + pSReg->fFlags = CPUMSELREG_FLAGS_VALID; + /** @todo check this on AMD-V */ + pSReg->u64Base = 0; + pSReg->u32Limit = 0; + } +} + + +/** * Implements exceptions and interrupts for protected mode. * * @returns VBox strict status code. @@ -2074,6 +2324,7 @@ iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu, Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type)); return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); } + uint8_t f32BitGate = true; uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM; switch (Idte.Gate.u4Type) { @@ -2096,6 +2347,7 @@ iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu, } case X86_SEL_TYPE_SYS_286_INT_GATE: + f32BitGate = false; case X86_SEL_TYPE_SYS_386_INT_GATE: fEflToClear |= X86_EFL_IF; break; @@ -2105,6 +2357,7 @@ iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu, AssertFailedReturn(VERR_NOT_SUPPORTED); case X86_SEL_TYPE_SYS_286_TRAP_GATE: + f32BitGate = false; case X86_SEL_TYPE_SYS_386_TRAP_GATE: break; @@ -2138,7 +2391,7 @@ iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu, /* Fetch the descriptor for the new CS. */ IEMSELDESC DescCS; - rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS); + rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */ if (rcStrict != VINF_SUCCESS) { Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict))); @@ -2168,7 +2421,13 @@ iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu, u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl)); return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL); } - /** @todo is the RPL of the interrupt/trap gate descriptor checked? */ + + /* Make sure the selector is present. */ + if (!DescCS.Legacy.Gen.u1Present) + { + Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS)); + return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS); + } /* Check the new EIP against the new CS limit. */ uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE @@ -2178,25 +2437,31 @@ iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu, uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy); if (uNewEip > cbLimitCS) { - Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n", - u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl)); - return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL); + Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n", + u8Vector, uNewEip, cbLimitCS, NewCS)); + return iemRaiseGeneralProtectionFault(pIemCpu, 0); } - /* Make sure the selector is present. */ - if (!DescCS.Legacy.Gen.u1Present) + /* Calc the flag image to push. */ + uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx); + if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT)) + fEfl &= ~X86_EFL_RF; + else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu)) + fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */ + + /* From V8086 mode only go to CPL 0. */ + uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF + ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl; + if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */ { - Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS)); - return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS); + Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl)); + return iemRaiseGeneralProtectionFault(pIemCpu, 0); } /* * If the privilege level changes, we need to get a new stack from the TSS. * This in turns means validating the new SS and ESP... */ - uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx); - uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF - ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl; if (uNewCpl != pIemCpu->uCpl) { RTSEL NewSS; @@ -2217,7 +2482,9 @@ iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu, IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */ } - uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20; + uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM) + ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate + : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate; if ( uNewEsp - 1 > cbLimitSS || uNewEsp < cbStackFrame) { @@ -2237,15 +2504,42 @@ iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu, if (rcStrict != VINF_SUCCESS) return rcStrict; void * const pvStackFrame = uStackFrame.pv; - - if (fFlags & IEM_XCPT_FLAGS_ERR) - *uStackFrame.pu32++ = uErr; - uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT - ? pCtx->eip + cbInstr : pCtx->eip; - uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; - uStackFrame.pu32[2] = fEfl; - uStackFrame.pu32[3] = pCtx->esp; - uStackFrame.pu32[4] = pCtx->ss.Sel; + if (f32BitGate) + { + if (fFlags & IEM_XCPT_FLAGS_ERR) + *uStackFrame.pu32++ = uErr; + uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip; + uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; + uStackFrame.pu32[2] = fEfl; + uStackFrame.pu32[3] = pCtx->esp; + uStackFrame.pu32[4] = pCtx->ss.Sel; + if (fEfl & X86_EFL_VM) + { + uStackFrame.pu32[1] = pCtx->cs.Sel; + uStackFrame.pu32[5] = pCtx->es.Sel; + uStackFrame.pu32[6] = pCtx->ds.Sel; + uStackFrame.pu32[7] = pCtx->fs.Sel; + uStackFrame.pu32[8] = pCtx->gs.Sel; + } + } + else + { + if (fFlags & IEM_XCPT_FLAGS_ERR) + *uStackFrame.pu16++ = uErr; + uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip; + uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; + uStackFrame.pu16[2] = fEfl; + uStackFrame.pu16[3] = pCtx->sp; + uStackFrame.pu16[4] = pCtx->ss.Sel; + if (fEfl & X86_EFL_VM) + { + uStackFrame.pu16[1] = pCtx->cs.Sel; + uStackFrame.pu16[5] = pCtx->es.Sel; + uStackFrame.pu16[6] = pCtx->ds.Sel; + uStackFrame.pu16[7] = pCtx->fs.Sel; + uStackFrame.pu16[8] = pCtx->gs.Sel; + } + } rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -2281,6 +2575,14 @@ iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu, pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy); pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */ pIemCpu->uCpl = uNewCpl; + + if (fEfl & X86_EFL_VM) + { + iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs); + iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs); + iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es); + iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds); + } } /* * Same privilege, no stack change and smaller stack frame. @@ -2289,18 +2591,28 @@ iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu, { uint64_t uNewRsp; RTPTRUNION uStackFrame; - uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12; + uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate; rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp); if (rcStrict != VINF_SUCCESS) return rcStrict; void * const pvStackFrame = uStackFrame.pv; - if (fFlags & IEM_XCPT_FLAGS_ERR) - *uStackFrame.pu32++ = uErr; - uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT - ? pCtx->eip + cbInstr : pCtx->eip; - uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; - uStackFrame.pu32[2] = fEfl; + if (f32BitGate) + { + if (fFlags & IEM_XCPT_FLAGS_ERR) + *uStackFrame.pu32++ = uErr; + uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip; + uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; + uStackFrame.pu32[2] = fEfl; + } + else + { + if (fFlags & IEM_XCPT_FLAGS_ERR) + *uStackFrame.pu16++ = uErr; + uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip; + uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; + uStackFrame.pu16[2] = fEfl; + } rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */ if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -2343,34 +2655,6 @@ iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu, /** - * Implements exceptions and interrupts for V8086 mode. - * - * @returns VBox strict status code. - * @param pIemCpu The IEM per CPU instance data. - * @param pCtx The CPU context. - * @param cbInstr The number of bytes to offset rIP by in the return - * address. - * @param u8Vector The interrupt / exception vector number. - * @param fFlags The flags. - * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set. - * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set. - */ -static VBOXSTRICTRC -iemRaiseXcptOrIntInV8086Mode(PIEMCPU pIemCpu, - PCPUMCTX pCtx, - uint8_t cbInstr, - uint8_t u8Vector, - uint32_t fFlags, - uint16_t uErr, - uint64_t uCr2) -{ - NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2); - /** @todo implement me. */ - IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("V8086 exception / interrupt dispatching\n")); -} - - -/** * Implements exceptions and interrupts for long mode. * * @returns VBox strict status code. @@ -2392,9 +2676,227 @@ iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu, uint16_t uErr, uint64_t uCr2) { - NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2); - /** @todo implement me. */ - IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("long mode exception / interrupt dispatching\n")); + NOREF(cbInstr); + + /* + * Read the IDT entry. + */ + uint16_t offIdt = (uint16_t)u8Vector << 4; + if (pCtx->idtr.cbIdt < offIdt + 7) + { + Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt)); + return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); + } + X86DESC64 Idte; + VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt); + if (RT_LIKELY(rcStrict == VINF_SUCCESS)) + rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8); + if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) + return rcStrict; + Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n", + u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type, + Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow)); + + /* + * Check the descriptor type, DPL and such. + * ASSUMES this is done in the same order as described for call-gate calls. + */ + if (Idte.Gate.u1DescType) + { + Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type)); + return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); + } + uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM; + switch (Idte.Gate.u4Type) + { + case AMD64_SEL_TYPE_SYS_INT_GATE: + fEflToClear |= X86_EFL_IF; + break; + case AMD64_SEL_TYPE_SYS_TRAP_GATE: + break; + + default: + Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type)); + return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); + } + + /* Check DPL against CPL if applicable. */ + if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) + { + if (pIemCpu->uCpl > Idte.Gate.u2Dpl) + { + Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl)); + return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); + } + } + + /* Is it there? */ + if (!Idte.Gate.u1Present) + { + Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector)); + return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); + } + + /* A null CS is bad. */ + RTSEL NewCS = Idte.Gate.u16Sel; + if (!(NewCS & X86_SEL_MASK_OFF_RPL)) + { + Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS)); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + + /* Fetch the descriptor for the new CS. */ + IEMSELDESC DescCS; + rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); + if (rcStrict != VINF_SUCCESS) + { + Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict))); + return rcStrict; + } + + /* Must be a 64-bit code segment. */ + if (!DescCS.Long.Gen.u1DescType) + { + Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type)); + return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL); + } + if ( !DescCS.Long.Gen.u1Long + || DescCS.Long.Gen.u1DefBig + || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) ) + { + Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n", + u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig)); + return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL); + } + + /* Don't allow lowering the privilege level. For non-conforming CS + selectors, the CS.DPL sets the privilege level the trap/interrupt + handler runs at. For conforming CS selectors, the CPL remains + unchanged, but the CS.DPL must be <= CPL. */ + /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched + * when CPU in Ring-0. Result \#GP? */ + if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl) + { + Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n", + u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl)); + return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL); + } + + + /* Make sure the selector is present. */ + if (!DescCS.Legacy.Gen.u1Present) + { + Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS)); + return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS); + } + + /* Check that the new RIP is canonical. */ + uint64_t const uNewRip = Idte.Gate.u16OffsetLow + | ((uint32_t)Idte.Gate.u16OffsetHigh << 16) + | ((uint64_t)Idte.Gate.u32OffsetTop << 32); + if (!IEM_IS_CANONICAL(uNewRip)) + { + Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip)); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + + /* + * If the privilege level changes or if the IST isn't zero, we need to get + * a new stack from the TSS. + */ + uint64_t uNewRsp; + uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF + ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl; + if ( uNewCpl != pIemCpu->uCpl + || Idte.Gate.u3IST != 0) + { + rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp); + if (rcStrict != VINF_SUCCESS) + return rcStrict; + } + else + uNewRsp = pCtx->rsp; + uNewRsp &= ~(uint64_t)0xf; + + /* + * Calc the flag image to push. + */ + uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx); + if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT)) + fEfl &= ~X86_EFL_RF; + else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu)) + fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */ + + /* + * Start making changes. + */ + + /* Create the stack frame. */ + uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR)); + RTPTRUNION uStackFrame; + rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX, + uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */ + if (rcStrict != VINF_SUCCESS) + return rcStrict; + void * const pvStackFrame = uStackFrame.pv; + + if (fFlags & IEM_XCPT_FLAGS_ERR) + *uStackFrame.pu64++ = uErr; + uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip; + uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */ + uStackFrame.pu64[2] = fEfl; + uStackFrame.pu64[3] = pCtx->rsp; + uStackFrame.pu64[4] = pCtx->ss.Sel; + rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); + if (rcStrict != VINF_SUCCESS) + return rcStrict; + + /* Mark the CS selectors 'accessed' (hope this is the correct time). */ + /** @todo testcase: excatly _when_ are the accessed bits set - before or + * after pushing the stack frame? (Write protect the gdt + stack to + * find out.) */ + if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) + { + rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS); + if (rcStrict != VINF_SUCCESS) + return rcStrict; + DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; + } + + /* + * Start comitting the register changes. + */ + /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the + * hidden registers when interrupting 32-bit or 16-bit code! */ + if (uNewCpl != pIemCpu->uCpl) + { + pCtx->ss.Sel = 0 | uNewCpl; + pCtx->ss.ValidSel = 0 | uNewCpl; + pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; + pCtx->ss.u32Limit = UINT32_MAX; + pCtx->ss.u64Base = 0; + pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE; + } + pCtx->rsp = uNewRsp - cbStackFrame; + pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl; + pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl; + pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; + pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy); + pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); + pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); + pCtx->rip = uNewRip; + pIemCpu->uCpl = uNewCpl; + + fEfl &= ~fEflToClear; + IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl); + + if (fFlags & IEM_XCPT_FLAGS_CR2) + pCtx->cr2 = uCr2; + + if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT) + iemRaiseXcptAdjustState(pCtx, u8Vector); + + return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS; } @@ -2423,6 +2925,25 @@ iemRaiseXcptOrInt(PIEMCPU pIemCpu, PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); /* + * Perform the V8086 IOPL check and upgrade the fault without nesting. + */ + if ( pCtx->eflags.Bits.u1VM + && pCtx->eflags.Bits.u2IOPL != 3 + && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT + && (pCtx->cr0 & X86_CR0_PE) ) + { + Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector)); + fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR; + u8Vector = X86_XCPT_GP; + uErr = 0; + } +#ifdef DBGFTRACE_ENABLED + RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx", + pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2, + pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp); +#endif + + /* * Do recursion accounting. */ uint8_t const uPrevXcpt = pIemCpu->uCurXcpt; @@ -2437,7 +2958,12 @@ iemRaiseXcptOrInt(PIEMCPU pIemCpu, /** @todo double and tripple faults. */ if (pIemCpu->cXcptRecursions >= 3) + { +#ifdef DEBUG_bird + AssertFailed(); +#endif IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n")); + } /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate. if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT) @@ -2458,7 +2984,7 @@ iemRaiseXcptOrInt(PIEMCPU pIemCpu, PVM pVM = IEMCPU_TO_VM(pIemCpu); PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); char szRegs[4096]; - DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs), + DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs), "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n" "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n" "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n" @@ -2486,7 +3012,7 @@ iemRaiseXcptOrInt(PIEMCPU pIemCpu, ); char szInstr[256]; - DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0, + DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0, DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE, szInstr, sizeof(szInstr), NULL); Log3(("%s%s\n", szRegs, szInstr)); @@ -2501,10 +3027,8 @@ iemRaiseXcptOrInt(PIEMCPU pIemCpu, rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2); else if (pCtx->msrEFER & MSR_K6_EFER_LMA) rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2); - else if (!pCtx->eflags.Bits.u1VM) - rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2); else - rcStrict = iemRaiseXcptOrIntInV8086Mode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2); + rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2); /* * Unwind. @@ -2525,10 +3049,12 @@ DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu) } -/** \#DB - 01. */ +/** \#DB - 01. + * @note This automatically clear DR7.GD. */ DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu) { /** @todo set/clear RF. */ + pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD; return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0); } @@ -2564,6 +3090,22 @@ DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU p } +/** \#TS(0) - 0a. */ +DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu) +{ + return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, + 0, 0); +} + + +/** \#TS(err) - 0a. */ +DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel) +{ + return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, + uSel & X86_SEL_MASK_OFF_RPL, 0); +} + + /** \#NP(err) - 0b. */ DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr) { @@ -2679,11 +3221,19 @@ DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) ) uErr |= X86_TRAP_PF_ID; +#if 0 /* This is so much non-sense, really. Why was it done like that? */ /* Note! RW access callers reporting a WRITE protection fault, will clear the READ flag before calling. So, read-modify-write accesses (RW) can safely be reported as READ faults. */ if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE) uErr |= X86_TRAP_PF_RW; +#else + if (fAccess & IEM_ACCESS_TYPE_WRITE) + { + if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ)) + uErr |= X86_TRAP_PF_RW; + } +#endif return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2, uErr, GCPtrWhere); @@ -2835,7 +3385,7 @@ static void iemOpStubMsg2(PIEMCPU pIemCpu) PVM pVM = IEMCPU_TO_VM(pIemCpu); PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); char szRegs[4096]; - DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs), + DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs), "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n" "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n" "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n" @@ -2863,7 +3413,7 @@ static void iemOpStubMsg2(PIEMCPU pIemCpu) ); char szInstr[256]; - DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0, + DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0, DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE, szInstr, sizeof(szInstr), NULL); @@ -3188,6 +3738,7 @@ static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr IEM_NOT_REACHED_DEFAULT_CASE_RET(); } + pCtx->eflags.Bits.u1RF = 0; return VINF_SUCCESS; } @@ -3211,8 +3762,9 @@ static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextIns if ( uNewIp > pCtx->cs.u32Limit && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */ return iemRaiseGeneralProtectionFault0(pIemCpu); - /** @todo Test 16-bit jump in 64-bit mode. */ + /** @todo Test 16-bit jump in 64-bit mode. possible? */ pCtx->rip = uNewIp; + pCtx->eflags.Bits.u1RF = 0; return VINF_SUCCESS; } @@ -3251,6 +3803,7 @@ static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextIns return iemRaiseGeneralProtectionFault0(pIemCpu); pCtx->rip = uNewRip; } + pCtx->eflags.Bits.u1RF = 0; return VINF_SUCCESS; } @@ -3305,6 +3858,7 @@ static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip) IEM_NOT_REACHED_DEFAULT_CASE_RET(); } + pCtx->eflags.Bits.u1RF = 0; return VINF_SUCCESS; } @@ -3312,12 +3866,13 @@ static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip) /** * Get the address of the top of the stack. * + * @param pIemCpu The per CPU data. * @param pCtx The CPU context which SP/ESP/RSP should be * read. */ -DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx) +DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx) { - if (pCtx->ss.Attr.n.u1Long) + if (pIemCpu->enmCpuMode == IEMMODE_64BIT) return pCtx->rsp; if (pCtx->ss.Attr.n.u1DefBig) return pCtx->esp; @@ -3328,10 +3883,12 @@ DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx) /** * Updates the RIP/EIP/IP to point to the next instruction. * + * This function leaves the EFLAGS.RF flag alone. + * * @param pIemCpu The per CPU data. * @param cbInstr The number of bytes to add. */ -static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr) +static void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr) { PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); switch (pIemCpu->enmCpuMode) @@ -3355,27 +3912,75 @@ static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr) } +#if 0 /** * Updates the RIP/EIP/IP to point to the next instruction. * * @param pIemCpu The per CPU data. */ -static void iemRegUpdateRip(PIEMCPU pIemCpu) +static void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu) +{ + return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode); +} +#endif + + + +/** + * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF. + * + * @param pIemCpu The per CPU data. + * @param cbInstr The number of bytes to add. + */ +static void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr) +{ + PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); + + pCtx->eflags.Bits.u1RF = 0; + + switch (pIemCpu->enmCpuMode) + { + case IEMMODE_16BIT: + Assert(pCtx->rip <= UINT16_MAX); + pCtx->eip += cbInstr; + pCtx->eip &= UINT32_C(0xffff); + break; + + case IEMMODE_32BIT: + pCtx->eip += cbInstr; + Assert(pCtx->rip <= UINT32_MAX); + break; + + case IEMMODE_64BIT: + pCtx->rip += cbInstr; + break; + default: AssertFailed(); + } +} + + +/** + * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF. + * + * @param pIemCpu The per CPU data. + */ +static void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu) { - return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode); + return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode); } /** * Adds to the stack pointer. * + * @param pIemCpu The per CPU data. * @param pCtx The CPU context which SP/ESP/RSP should be * updated. * @param cbToAdd The number of bytes to add. */ -DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd) +DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd) { - if (pCtx->ss.Attr.n.u1Long) + if (pIemCpu->enmCpuMode == IEMMODE_64BIT) pCtx->rsp += cbToAdd; else if (pCtx->ss.Attr.n.u1DefBig) pCtx->esp += cbToAdd; @@ -3387,13 +3992,14 @@ DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd) /** * Subtracts from the stack pointer. * + * @param pIemCpu The per CPU data. * @param pCtx The CPU context which SP/ESP/RSP should be * updated. * @param cbToSub The number of bytes to subtract. */ -DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub) +DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub) { - if (pCtx->ss.Attr.n.u1Long) + if (pIemCpu->enmCpuMode == IEMMODE_64BIT) pCtx->rsp -= cbToSub; else if (pCtx->ss.Attr.n.u1DefBig) pCtx->esp -= cbToSub; @@ -3405,13 +4011,14 @@ DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub) /** * Adds to the temporary stack pointer. * + * @param pIemCpu The per CPU data. * @param pTmpRsp The temporary SP/ESP/RSP to update. * @param cbToAdd The number of bytes to add. * @param pCtx Where to get the current stack mode. */ -DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint16_t cbToAdd, PCCPUMCTX pCtx) +DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd) { - if (pCtx->ss.Attr.n.u1Long) + if (pIemCpu->enmCpuMode == IEMMODE_64BIT) pTmpRsp->u += cbToAdd; else if (pCtx->ss.Attr.n.u1DefBig) pTmpRsp->DWords.dw0 += cbToAdd; @@ -3423,15 +4030,16 @@ DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint16_t cbToAdd, PCCPUMCT /** * Subtracts from the temporary stack pointer. * + * @param pIemCpu The per CPU data. * @param pTmpRsp The temporary SP/ESP/RSP to update. * @param cbToSub The number of bytes to subtract. * @param pCtx Where to get the current stack mode. * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is * expecting that. */ -DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint16_t cbToSub, PCCPUMCTX pCtx) +DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub) { - if (pCtx->ss.Attr.n.u1Long) + if (pIemCpu->enmCpuMode == IEMMODE_64BIT) pTmpRsp->u -= cbToSub; else if (pCtx->ss.Attr.n.u1DefBig) pTmpRsp->DWords.dw0 -= cbToSub; @@ -3445,17 +4053,18 @@ DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint16_t cbToSub, PCCPUM * well as the new RSP value (upper bits may be masked). * * @returns Effective stack addressf for the push. + * @param pIemCpu The IEM per CPU data. * @param pCtx Where to get the current stack mode. * @param cbItem The size of the stack item to pop. * @param puNewRsp Where to return the new RSP value. */ -DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp) +DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp) { RTUINT64U uTmpRsp; RTGCPTR GCPtrTop; uTmpRsp.u = pCtx->rsp; - if (pCtx->ss.Attr.n.u1Long) + if (pIemCpu->enmCpuMode == IEMMODE_64BIT) GCPtrTop = uTmpRsp.u -= cbItem; else if (pCtx->ss.Attr.n.u1DefBig) GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem; @@ -3471,17 +4080,18 @@ DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t * specified size. * * @returns Current stack pointer. + * @param pIemCpu The per CPU data. * @param pCtx Where to get the current stack mode. * @param cbItem The size of the stack item to pop. * @param puNewRsp Where to return the new RSP value. */ -DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp) +DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp) { RTUINT64U uTmpRsp; RTGCPTR GCPtrTop; uTmpRsp.u = pCtx->rsp; - if (pCtx->ss.Attr.n.u1Long) + if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { GCPtrTop = uTmpRsp.u; uTmpRsp.u += cbItem; @@ -3506,15 +4116,16 @@ DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t * well as the new temporary RSP value (upper bits may be masked). * * @returns Effective stack addressf for the push. + * @param pIemCpu The per CPU data. * @param pTmpRsp The temporary stack pointer. This is updated. * @param cbItem The size of the stack item to pop. * @param puNewRsp Where to return the new RSP value. */ -DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx) +DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem) { RTGCPTR GCPtrTop; - if (pCtx->ss.Attr.n.u1Long) + if (pIemCpu->enmCpuMode == IEMMODE_64BIT) GCPtrTop = pTmpRsp->u -= cbItem; else if (pCtx->ss.Attr.n.u1DefBig) GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem; @@ -3529,14 +4140,15 @@ DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PC * calculates and updates the temporary RSP. * * @returns Current stack pointer. + * @param pIemCpu The per CPU data. * @param pTmpRsp The temporary stack pointer. This is updated. * @param pCtx Where to get the current stack mode. * @param cbItem The size of the stack item to pop. */ -DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx) +DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem) { RTGCPTR GCPtrTop; - if (pCtx->ss.Attr.n.u1Long) + if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { GCPtrTop = pTmpRsp->u; pTmpRsp->u += cbItem; @@ -3622,6 +4234,19 @@ DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu) /** + * Hook for preparing to use the host FPU for SSE + * + * This is necessary in ring-0 and raw-mode context. + * + * @param pIemCpu The IEM per CPU data. + */ +DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu) +{ + iemFpuPrepareUsage(pIemCpu); +} + + +/** * Stores a QNaN value into a FPU register. * * @param pReg Pointer to the register. @@ -4513,19 +5138,25 @@ DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAc * @param pIemCpu The IEM per CPU data. * @param pHid Pointer to the hidden register. * @param iSegReg The register number. + * @param pu64BaseAddr Where to return the base address to use for the + * segment. (In 64-bit code it may differ from the + * base in the hidden segment.) */ -static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg) +static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr) { - if (!pHid->Attr.n.u1Present) - return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg); - - if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE) - || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) ) - && pIemCpu->enmCpuMode != IEMMODE_64BIT ) - return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W); - - /** @todo DPL/RPL/CPL? */ + if (pIemCpu->enmCpuMode == IEMMODE_64BIT) + *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base; + else + { + if (!pHid->Attr.n.u1Present) + return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg); + if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE) + || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) ) + && pIemCpu->enmCpuMode != IEMMODE_64BIT ) + return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W); + *pu64BaseAddr = pHid->u64Base; + } return VINF_SUCCESS; } @@ -4539,18 +5170,23 @@ static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID * @param pIemCpu The IEM per CPU data. * @param pHid Pointer to the hidden register. * @param iSegReg The register number. + * @param pu64BaseAddr Where to return the base address to use for the + * segment. (In 64-bit code it may differ from the + * base in the hidden segment.) */ -static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg) +static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr) { - if (!pHid->Attr.n.u1Present) - return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg); - - if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE - && pIemCpu->enmCpuMode != IEMMODE_64BIT ) - return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R); - - /** @todo DPL/RPL/CPL? */ + if (pIemCpu->enmCpuMode == IEMMODE_64BIT) + *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base; + else + { + if (!pHid->Attr.n.u1Present) + return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg); + if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE) + return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R); + *pu64BaseAddr = pHid->u64Base; + } return VINF_SUCCESS; } @@ -4606,14 +5242,17 @@ static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_ if ( GCPtrFirst32 > pSel->u32Limit || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */ return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess); - - *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base; } else { - /** @todo implement expand down segments. */ - IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); + /* + * The upper boundary is defined by the B bit, not the G bit! + */ + if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1) + || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))) + return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess); } + *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base; } else { @@ -4715,6 +5354,18 @@ static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR G } } + /* + * Set the dirty / access flags. + * ASSUMES this is set when the address is translated rather than on committ... + */ + /** @todo testcase: check when A and D bits are actually set by the CPU. */ + uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A; + if ((fFlags & fAccessedDirty) != fAccessedDirty) + { + int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty); + AssertRC(rc2); + } + GCPhys |= GCPtrMem & PAGE_OFFSET_MASK; *pGCPhysMem = GCPhys; return VINF_SUCCESS; @@ -4737,7 +5388,16 @@ static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, #ifdef IEM_VERIFICATION_MODE_FULL /* Force the alternative path so we can ignore writes. */ if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem) + { + if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu)) + { + int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem, + RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers); + if (RT_FAILURE(rc2)) + pIemCpu->fProblematicMemory = true; + } return VERR_PGM_PHYS_TLB_CATCH_ALL; + } #endif #ifdef IEM_LOG_MEMORY_WRITES if (fAccess & IEM_ACCESS_TYPE_WRITE) @@ -4751,7 +5411,7 @@ static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, * regarding locking and unlocking needs to be struct. A couple of TLBs * living in PGM, but with publicly accessible inlined access methods * could perhaps be an even better solution. */ - int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), + int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), GCPhysMem, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers, @@ -4759,6 +5419,11 @@ static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, pLock); /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/ AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc)); + +#ifdef IEM_VERIFICATION_MODE_FULL + if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu)) + pIemCpu->fProblematicMemory = true; +#endif return rc; } @@ -4976,6 +5641,24 @@ static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, return rcStrict; GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK; +#ifdef IEM_VERIFICATION_MODE_FULL + /* + * Detect problematic memory when verifying so we can select + * the right execution engine. (TLB: Redo this.) + */ + if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu)) + { + int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, + RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers); + if (RT_SUCCESS(rc2)) + rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, + RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers); + if (RT_FAILURE(rc2)) + pIemCpu->fProblematicMemory = true; + } +#endif + + /* * Read in the current memory content if it's a read, execute or partial * write access. @@ -5069,6 +5752,7 @@ static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false; pIemCpu->aMemMappings[iMemMap].pv = pbBuf; pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED; + pIemCpu->iNextMapping = iMemMap + 1; pIemCpu->cActiveMappings++; iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem); @@ -5158,6 +5842,7 @@ static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED; pIemCpu->aMemMappings[iMemMap].pv = pbBuf; pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED; + pIemCpu->iNextMapping = iMemMap + 1; pIemCpu->cActiveMappings++; iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem); @@ -5201,11 +5886,12 @@ static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint /* * Check the input and figure out which mapping entry to use. */ - Assert(cbMem <= 32 || cbMem == 512 || cbMem == 108 || cbMem == 94); + Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 94); /* 512 is the max! */ Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK))); unsigned iMemMap = pIemCpu->iNextMapping; - if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)) + if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings) + || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID) { iMemMap = iemMemMapFindFree(pIemCpu); AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3); @@ -5278,6 +5964,34 @@ static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t /** + * Rollbacks mappings, releasing page locks and such. + * + * The caller shall only call this after checking cActiveMappings. + * + * @returns Strict VBox status code to pass up. + * @param pIemCpu The IEM per CPU data. + */ +static void iemMemRollback(PIEMCPU pIemCpu) +{ + Assert(pIemCpu->cActiveMappings > 0); + + uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings); + while (iMemMap-- > 0) + { + uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess; + if (fAccess != IEM_ACCESS_INVALID) + { + pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID; + if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED)) + PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock); + Assert(pIemCpu->cActiveMappings > 0); + pIemCpu->cActiveMappings--; + } + } +} + + +/** * Fetches a data byte. * * @returns Strict VBox status code. @@ -5404,6 +6118,34 @@ static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8 /** + * Fetches a data qword, aligned at a 16 byte boundrary (for SSE). + * + * @returns Strict VBox status code. + * @param pIemCpu The IEM per CPU data. + * @param pu64Dst Where to return the qword. + * @param iSegReg The index of the segment register to use for + * this access. The base and limits are checked. + * @param GCPtrMem The address of the guest memory. + */ +static VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) +{ + /* The lazy approach for now... */ + /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */ + if (RT_UNLIKELY(GCPtrMem & 15)) + return iemRaiseGeneralProtectionFault0(pIemCpu); + + uint64_t const *pu64Src; + VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); + if (rc == VINF_SUCCESS) + { + *pu64Dst = *pu64Src; + rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R); + } + return rc; +} + + +/** * Fetches a data tword. * * @returns Strict VBox status code. @@ -5428,6 +6170,63 @@ static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uin /** + * Fetches a data dqword (double qword), generally SSE related. + * + * @returns Strict VBox status code. + * @param pIemCpu The IEM per CPU data. + * @param pu128Dst Where to return the qword. + * @param iSegReg The index of the segment register to use for + * this access. The base and limits are checked. + * @param GCPtrMem The address of the guest memory. + */ +static VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) +{ + /* The lazy approach for now... */ + uint128_t const *pu128Src; + VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); + if (rc == VINF_SUCCESS) + { + *pu128Dst = *pu128Src; + rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R); + } + return rc; +} + + +/** + * Fetches a data dqword (double qword) at an aligned address, generally SSE + * related. + * + * Raises GP(0) if not aligned. + * + * @returns Strict VBox status code. + * @param pIemCpu The IEM per CPU data. + * @param pu128Dst Where to return the qword. + * @param iSegReg The index of the segment register to use for + * this access. The base and limits are checked. + * @param GCPtrMem The address of the guest memory. + */ +static VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) +{ + /* The lazy approach for now... */ + /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */ + if ((GCPtrMem & 15) && !(pIemCpu->CTX_SUFF(pCtx)->fpu.MXCSR & X86_MSXCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ + return iemRaiseGeneralProtectionFault0(pIemCpu); + + uint128_t const *pu128Src; + VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); + if (rc == VINF_SUCCESS) + { + *pu128Dst = *pu128Src; + rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R); + } + return rc; +} + + + + +/** * Fetches a descriptor register (lgdt, lidt). * * @returns Strict VBox status code. @@ -5575,6 +6374,57 @@ static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR /** + * Stores a data dqword. + * + * @returns Strict VBox status code. + * @param pIemCpu The IEM per CPU data. + * @param iSegReg The index of the segment register to use for + * this access. The base and limits are checked. + * @param GCPtrMem The address of the guest memory. + * @param u64Value The value to store. + */ +static VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value) +{ + /* The lazy approach for now... */ + uint128_t *pu128Dst; + VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); + if (rc == VINF_SUCCESS) + { + *pu128Dst = u128Value; + rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W); + } + return rc; +} + + +/** + * Stores a data dqword, SSE aligned. + * + * @returns Strict VBox status code. + * @param pIemCpu The IEM per CPU data. + * @param iSegReg The index of the segment register to use for + * this access. The base and limits are checked. + * @param GCPtrMem The address of the guest memory. + * @param u64Value The value to store. + */ +static VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value) +{ + /* The lazy approach for now... */ + if ((GCPtrMem & 15) && !(pIemCpu->CTX_SUFF(pCtx)->fpu.MXCSR & X86_MSXCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ + return iemRaiseGeneralProtectionFault0(pIemCpu); + + uint128_t *pu128Dst; + VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); + if (rc == VINF_SUCCESS) + { + *pu128Dst = u128Value; + rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W); + } + return rc; +} + + +/** * Stores a descriptor register (sgdt, sidt). * * @returns Strict VBox status code. @@ -5638,7 +6488,7 @@ static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value) /* Increment the stack pointer. */ uint64_t uNewRsp; PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); - RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp); + RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp); /* Write the word the lazy way. */ uint16_t *pu16Dst; @@ -5669,9 +6519,9 @@ static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value) /* Increment the stack pointer. */ uint64_t uNewRsp; PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); - RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp); + RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp); - /* Write the word the lazy way. */ + /* Write the dword the lazy way. */ uint32_t *pu32Dst; VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); if (rc == VINF_SUCCESS) @@ -5689,6 +6539,58 @@ static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value) /** + * Pushes a dword segment register value onto the stack. + * + * @returns Strict VBox status code. + * @param pIemCpu The IEM per CPU data. + * @param u16Value The value to push. + */ +static VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value) +{ + /* Increment the stack pointer. */ + uint64_t uNewRsp; + PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); + RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp); + + VBOXSTRICTRC rc; + if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu)) + { + /* The recompiler writes a full dword. */ + uint32_t *pu32Dst; + rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); + if (rc == VINF_SUCCESS) + { + *pu32Dst = u32Value; + rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W); + } + } + else + { + /* The intel docs talks about zero extending the selector register + value. My actual intel CPU here might be zero extending the value + but it still only writes the lower word... */ + /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what + * happens when crossing an electric page boundrary, is the high word + * checked for write accessibility or not? Probably it is. What about + * segment limits? */ + uint16_t *pu16Dst; + rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW); + if (rc == VINF_SUCCESS) + { + *pu16Dst = (uint16_t)u32Value; + rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW); + } + } + + /* Commit the new RSP value unless we an access handler made trouble. */ + if (rc == VINF_SUCCESS) + pCtx->rsp = uNewRsp; + + return rc; +} + + +/** * Pushes a qword onto the stack. * * @returns Strict VBox status code. @@ -5700,7 +6602,7 @@ static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value) /* Increment the stack pointer. */ uint64_t uNewRsp; PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); - RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp); + RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp); /* Write the word the lazy way. */ uint64_t *pu64Dst; @@ -5731,7 +6633,7 @@ static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value) /* Increment the stack pointer. */ uint64_t uNewRsp; PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); - RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp); + RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp); /* Write the word the lazy way. */ uint16_t const *pu16Src; @@ -5762,7 +6664,7 @@ static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value) /* Increment the stack pointer. */ uint64_t uNewRsp; PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); - RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp); + RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp); /* Write the word the lazy way. */ uint32_t const *pu32Src; @@ -5793,7 +6695,7 @@ static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value) /* Increment the stack pointer. */ uint64_t uNewRsp; PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); - RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp); + RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp); /* Write the word the lazy way. */ uint64_t const *pu64Src; @@ -5825,7 +6727,7 @@ static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRT /* Increment the stack pointer. */ PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); RTUINT64U NewRsp = *pTmpRsp; - RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx); + RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2); /* Write the word the lazy way. */ uint16_t *pu16Dst; @@ -5857,7 +6759,7 @@ static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRT /* Increment the stack pointer. */ PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); RTUINT64U NewRsp = *pTmpRsp; - RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx); + RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4); /* Write the word the lazy way. */ uint32_t *pu32Dst; @@ -5889,7 +6791,7 @@ static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRT /* Increment the stack pointer. */ PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); RTUINT64U NewRsp = *pTmpRsp; - RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx); + RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8); /* Write the word the lazy way. */ uint64_t *pu64Dst; @@ -5921,7 +6823,7 @@ static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PR /* Increment the stack pointer. */ PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); RTUINT64U NewRsp = *pTmpRsp; - RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx); + RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2); /* Write the word the lazy way. */ uint16_t const *pu16Src; @@ -5953,7 +6855,7 @@ static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PR /* Increment the stack pointer. */ PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); RTUINT64U NewRsp = *pTmpRsp; - RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx); + RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4); /* Write the word the lazy way. */ uint32_t const *pu32Src; @@ -5985,7 +6887,7 @@ static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PR /* Increment the stack pointer. */ PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); RTUINT64U NewRsp = *pTmpRsp; - RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx); + RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8); /* Write the word the lazy way. */ uint64_t const *pu64Src; @@ -6025,7 +6927,7 @@ static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, v { Assert(cbMem < UINT8_MAX); PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); - RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp); + RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp); return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); } @@ -6069,7 +6971,7 @@ static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, vo { Assert(cbMem < UINT8_MAX); PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); - RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp); + RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp); return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); } @@ -6094,7 +6996,7 @@ static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); RTUINT64U NewRsp; NewRsp.u = *puNewRsp; - RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx); + RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8); *puNewRsp = NewRsp.u; return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); } @@ -6140,6 +7042,54 @@ static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem /** + * Fetches a system table byte. + * + * @returns Strict VBox status code. + * @param pIemCpu The IEM per CPU data. + * @param pbDst Where to return the byte. + * @param iSegReg The index of the segment register to use for + * this access. The base and limits are checked. + * @param GCPtrMem The address of the guest memory. + */ +static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) +{ + /* The lazy approach for now... */ + uint8_t const *pbSrc; + VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R); + if (rc == VINF_SUCCESS) + { + *pbDst = *pbSrc; + rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R); + } + return rc; +} + + +/** + * Fetches a system table word. + * + * @returns Strict VBox status code. + * @param pIemCpu The IEM per CPU data. + * @param pu16Dst Where to return the word. + * @param iSegReg The index of the segment register to use for + * this access. The base and limits are checked. + * @param GCPtrMem The address of the guest memory. + */ +static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) +{ + /* The lazy approach for now... */ + uint16_t const *pu16Src; + VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R); + if (rc == VINF_SUCCESS) + { + *pu16Dst = *pu16Src; + rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R); + } + return rc; +} + + +/** * Fetches a system table dword. * * @returns Strict VBox status code. @@ -6194,8 +7144,9 @@ static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_ * @param pIemCpu The IEM per CPU. * @param pDesc Where to return the descriptor table entry. * @param uSel The selector which table entry to fetch. + * @param uXcpt The exception to raise on table lookup error. */ -static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel) +static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) { PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); @@ -6211,8 +7162,8 @@ static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint1 { Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n", uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel)); - /** @todo is this the right exception? */ - return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); + return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, + uSel & ~X86_SEL_RPL, 0); } Assert(pCtx->ldtr.Attr.n.u1Present); @@ -6223,8 +7174,8 @@ static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint1 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt) { Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt)); - /** @todo is this the right exception? */ - return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); + return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, + uSel & ~X86_SEL_RPL, 0); } GCPtrBase = pCtx->gdtr.pGdt; } @@ -6245,7 +7196,8 @@ static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint1 { Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel)); /** @todo is this the right exception? */ - return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); + return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, + uSel & ~X86_SEL_RPL, 0); } } return rcStrict; @@ -6357,7 +7309,7 @@ static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel) return rcStrict2; \ } while (0) -#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu) +#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu) #define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8)) #define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16)) #define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32)) @@ -6376,6 +7328,32 @@ static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel) if ((pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW & X86_FSW_ES) \ return iemRaiseMathFault(pIemCpu); \ } while (0) +#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \ + do { \ + if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \ + || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFSXR) \ + || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2) ) \ + return iemRaiseUndefinedOpcode(pIemCpu); \ + if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \ + return iemRaiseDeviceNotAvailable(pIemCpu); \ + } while (0) +#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \ + do { \ + if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \ + || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MMX) ) \ + return iemRaiseUndefinedOpcode(pIemCpu); \ + if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \ + return iemRaiseDeviceNotAvailable(pIemCpu); \ + } while (0) +#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \ + do { \ + if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \ + || ( !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE) \ + && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_AMD_FEATURE_EDX_AXMMX) ) ) \ + return iemRaiseUndefinedOpcode(pIemCpu); \ + if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \ + return iemRaiseDeviceNotAvailable(pIemCpu); \ + } while (0) #define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \ do { \ if (pIemCpu->uCpl != 0) \ @@ -6538,6 +7516,44 @@ static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel) #define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0) +#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \ + do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx; } while (0) +#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \ + do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].au32[0]; } while (0) +#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \ + do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0) +#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \ + do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0) +#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \ + (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx) +#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \ + (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx) +#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \ + (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx) + +#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \ + do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm; } while (0) +#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \ + do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0]; } while (0) +#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \ + do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au32[0]; } while (0) +#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \ + do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0) +#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \ + do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \ + pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[1] = 0; \ + } while (0) +#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \ + do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \ + pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[1] = 0; \ + } while (0) +#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \ + (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm) +#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \ + (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm) +#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \ + (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0]) + #define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem))) #define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \ @@ -6566,6 +7582,8 @@ static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel) IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem))) #define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp))) +#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \ + IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))) #define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem))) @@ -6574,6 +7592,12 @@ static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel) #define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \ IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))) +#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \ + IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))) +#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \ + IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))) + + #define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ do { \ @@ -6679,11 +7703,18 @@ static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel) (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \ } while (0) +#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \ + IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))) +#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \ + IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))) + #define IEM_MC_PUSH_U16(a_u16Value) \ IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value))) #define IEM_MC_PUSH_U32(a_u32Value) \ IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value))) +#define IEM_MC_PUSH_U32_SREG(a_u32Value) \ + IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value))) #define IEM_MC_PUSH_U64(a_u64Value) \ IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value))) @@ -6732,13 +7763,15 @@ static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel) } while (0) /** Calculate efficient address from R/M. */ -#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \ - IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff))) +#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \ + IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff))) +#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)() #define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0)) #define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1)) #define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2)) #define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3)) +#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2)) #define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3)) /** @@ -6771,7 +7804,7 @@ static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel) /** * Defers the rest of the instruction emulation to a C implementation routine - * and returns, taking two arguments in addition to the standard ones. + * and returns, taking three arguments in addition to the standard ones. * * @param a_pfnCImpl The pointer to the C routine. * @param a0 The first extra argument. @@ -6782,6 +7815,18 @@ static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel) /** * Defers the rest of the instruction emulation to a C implementation routine + * and returns, taking four arguments in addition to the standard ones. + * + * @param a_pfnCImpl The pointer to the C routine. + * @param a0 The first extra argument. + * @param a1 The second extra argument. + * @param a2 The third extra argument. + * @param a3 The fourth extra argument. + */ +#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3) + +/** + * Defers the rest of the instruction emulation to a C implementation routine * and returns, taking two arguments in addition to the standard ones. * * @param a_pfnCImpl The pointer to the C routine. @@ -6983,6 +8028,62 @@ static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel) #define IEM_MC_USED_FPU() \ CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM) +/** + * Calls a MMX assembly implementation taking two visible arguments. + * + * @param a_pfnAImpl Pointer to the assembly MMX routine. + * @param a0 The first extra argument. + * @param a1 The second extra argument. + */ +#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \ + do { \ + iemFpuPrepareUsage(pIemCpu); \ + a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \ + } while (0) + +/** + * Calls a MMX assembly implementation taking three visible arguments. + * + * @param a_pfnAImpl Pointer to the assembly MMX routine. + * @param a0 The first extra argument. + * @param a1 The second extra argument. + * @param a2 The third extra argument. + */ +#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \ + do { \ + iemFpuPrepareUsage(pIemCpu); \ + a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \ + } while (0) + + +/** + * Calls a SSE assembly implementation taking two visible arguments. + * + * @param a_pfnAImpl Pointer to the assembly MMX routine. + * @param a0 The first extra argument. + * @param a1 The second extra argument. + */ +#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \ + do { \ + iemFpuPrepareUsageSse(pIemCpu); \ + a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \ + } while (0) + +/** + * Calls a SSE assembly implementation taking three visible arguments. + * + * @param a_pfnAImpl Pointer to the assembly MMX routine. + * @param a0 The first extra argument. + * @param a1 The second extra argument. + * @param a2 The third extra argument. + */ +#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \ + do { \ + iemFpuPrepareUsageSse(pIemCpu); \ + a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \ + } while (0) + + /** @note Not for IOPL or IF testing. */ #define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) { /** @note Not for IOPL or IF testing. */ @@ -7106,6 +8207,15 @@ static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel) return IEMOP_RAISE_INVALID_OPCODE(); \ } while (0) +/** The instruction is only available in 64-bit mode, throw #UD if we're not in + * 64-bit mode. */ +#define IEMOP_HLP_ONLY_64BIT() \ + do \ + { \ + if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \ + return IEMOP_RAISE_INVALID_OPCODE(); \ + } while (0) + /** The instruction defaults to 64-bit operand size if 64-bit mode. */ #define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \ do \ @@ -7122,6 +8232,23 @@ static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel) pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \ } while (0) +/** Only a REX prefix immediately preceeding the first opcode byte takes + * effect. This macro helps ensuring this as well as logging bad guest code. */ +#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \ + do \ + { \ + if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \ + { \ + Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \ + pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \ + pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \ + pIemCpu->uRexB = 0; \ + pIemCpu->uRexIndex = 0; \ + pIemCpu->uRexReg = 0; \ + iemRecalEffOpSize(pIemCpu); \ + } \ + } while (0) + /** * Done decoding. */ @@ -7140,6 +8267,24 @@ static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel) if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \ return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \ } while (0) +#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \ + do \ + { \ + if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \ + { \ + NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \ + return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \ + } \ + } while (0) +#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \ + do \ + { \ + if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \ + { \ + NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \ + return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \ + } \ + } while (0) /** @@ -7150,9 +8295,12 @@ static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel) * @return Strict VBox status code. * @param pIemCpu The IEM per CPU data. * @param bRm The ModRM byte. + * @param cbImm The size of any immediate following the + * effective address opcode bytes. Important for + * RIP relative addressing. * @param pGCPtrEff Where to return the effective address. */ -static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff) +static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) { Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm)); PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); @@ -7163,10 +8311,10 @@ static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pIemCpu->iEffSeg = X86_SREG_SS; \ } while (0) -/** @todo Check the effective address size crap! */ - switch (pIemCpu->enmEffAddrMode) + if (pIemCpu->enmCpuMode != IEMMODE_64BIT) { - case IEMMODE_16BIT: +/** @todo Check the effective address size crap! */ + if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT) { uint16_t u16EffAddr; @@ -7199,12 +8347,10 @@ static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR } *pGCPtrEff = u16EffAddr; - Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff)); - return VINF_SUCCESS; } - - case IEMMODE_32BIT: + else { + Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT); uint32_t u32EffAddr; /* Handle the disp32 form with no registers first. */ @@ -7300,143 +8446,145 @@ static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT); *pGCPtrEff = u32EffAddr & UINT16_MAX; } - Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff)); - return VINF_SUCCESS; } + } + else + { + uint64_t u64EffAddr; - case IEMMODE_64BIT: + /* Handle the rip+disp32 form with no registers first. */ + if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5) { - uint64_t u64EffAddr; - - /* Handle the rip+disp32 form with no registers first. */ - if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5) - { - IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr); - u64EffAddr += pCtx->rip + pIemCpu->offOpcode; - } - else + IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr); + u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm; + } + else + { + /* Get the register (or SIB) value. */ + switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB) { - /* Get the register (or SIB) value. */ - switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB) + case 0: u64EffAddr = pCtx->rax; break; + case 1: u64EffAddr = pCtx->rcx; break; + case 2: u64EffAddr = pCtx->rdx; break; + case 3: u64EffAddr = pCtx->rbx; break; + case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break; + case 6: u64EffAddr = pCtx->rsi; break; + case 7: u64EffAddr = pCtx->rdi; break; + case 8: u64EffAddr = pCtx->r8; break; + case 9: u64EffAddr = pCtx->r9; break; + case 10: u64EffAddr = pCtx->r10; break; + case 11: u64EffAddr = pCtx->r11; break; + case 13: u64EffAddr = pCtx->r13; break; + case 14: u64EffAddr = pCtx->r14; break; + case 15: u64EffAddr = pCtx->r15; break; + /* SIB */ + case 4: + case 12: { - case 0: u64EffAddr = pCtx->rax; break; - case 1: u64EffAddr = pCtx->rcx; break; - case 2: u64EffAddr = pCtx->rdx; break; - case 3: u64EffAddr = pCtx->rbx; break; - case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break; - case 6: u64EffAddr = pCtx->rsi; break; - case 7: u64EffAddr = pCtx->rdi; break; - case 8: u64EffAddr = pCtx->r8; break; - case 9: u64EffAddr = pCtx->r9; break; - case 10: u64EffAddr = pCtx->r10; break; - case 11: u64EffAddr = pCtx->r11; break; - case 13: u64EffAddr = pCtx->r13; break; - case 14: u64EffAddr = pCtx->r14; break; - case 15: u64EffAddr = pCtx->r15; break; - /* SIB */ - case 4: - case 12: - { - uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib); + uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib); - /* Get the index and scale it. */ - switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex) - { - case 0: u64EffAddr = pCtx->rax; break; - case 1: u64EffAddr = pCtx->rcx; break; - case 2: u64EffAddr = pCtx->rdx; break; - case 3: u64EffAddr = pCtx->rbx; break; - case 4: u64EffAddr = 0; /*none */ break; - case 5: u64EffAddr = pCtx->rbp; break; - case 6: u64EffAddr = pCtx->rsi; break; - case 7: u64EffAddr = pCtx->rdi; break; - case 8: u64EffAddr = pCtx->r8; break; - case 9: u64EffAddr = pCtx->r9; break; - case 10: u64EffAddr = pCtx->r10; break; - case 11: u64EffAddr = pCtx->r11; break; - case 12: u64EffAddr = pCtx->r12; break; - case 13: u64EffAddr = pCtx->r13; break; - case 14: u64EffAddr = pCtx->r14; break; - case 15: u64EffAddr = pCtx->r15; break; - IEM_NOT_REACHED_DEFAULT_CASE_RET(); - } - u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; + /* Get the index and scale it. */ + switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex) + { + case 0: u64EffAddr = pCtx->rax; break; + case 1: u64EffAddr = pCtx->rcx; break; + case 2: u64EffAddr = pCtx->rdx; break; + case 3: u64EffAddr = pCtx->rbx; break; + case 4: u64EffAddr = 0; /*none */ break; + case 5: u64EffAddr = pCtx->rbp; break; + case 6: u64EffAddr = pCtx->rsi; break; + case 7: u64EffAddr = pCtx->rdi; break; + case 8: u64EffAddr = pCtx->r8; break; + case 9: u64EffAddr = pCtx->r9; break; + case 10: u64EffAddr = pCtx->r10; break; + case 11: u64EffAddr = pCtx->r11; break; + case 12: u64EffAddr = pCtx->r12; break; + case 13: u64EffAddr = pCtx->r13; break; + case 14: u64EffAddr = pCtx->r14; break; + case 15: u64EffAddr = pCtx->r15; break; + IEM_NOT_REACHED_DEFAULT_CASE_RET(); + } + u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; - /* add base */ - switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB) - { - case 0: u64EffAddr += pCtx->rax; break; - case 1: u64EffAddr += pCtx->rcx; break; - case 2: u64EffAddr += pCtx->rdx; break; - case 3: u64EffAddr += pCtx->rbx; break; - case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break; - case 6: u64EffAddr += pCtx->rsi; break; - case 7: u64EffAddr += pCtx->rdi; break; - case 8: u64EffAddr += pCtx->r8; break; - case 9: u64EffAddr += pCtx->r9; break; - case 10: u64EffAddr += pCtx->r10; break; - case 11: u64EffAddr += pCtx->r11; break; - case 14: u64EffAddr += pCtx->r14; break; - case 15: u64EffAddr += pCtx->r15; break; - /* complicated encodings */ - case 5: - case 13: - if ((bRm & X86_MODRM_MOD_MASK) != 0) + /* add base */ + switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB) + { + case 0: u64EffAddr += pCtx->rax; break; + case 1: u64EffAddr += pCtx->rcx; break; + case 2: u64EffAddr += pCtx->rdx; break; + case 3: u64EffAddr += pCtx->rbx; break; + case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break; + case 6: u64EffAddr += pCtx->rsi; break; + case 7: u64EffAddr += pCtx->rdi; break; + case 8: u64EffAddr += pCtx->r8; break; + case 9: u64EffAddr += pCtx->r9; break; + case 10: u64EffAddr += pCtx->r10; break; + case 11: u64EffAddr += pCtx->r11; break; + case 12: u64EffAddr += pCtx->r12; break; + case 14: u64EffAddr += pCtx->r14; break; + case 15: u64EffAddr += pCtx->r15; break; + /* complicated encodings */ + case 5: + case 13: + if ((bRm & X86_MODRM_MOD_MASK) != 0) + { + if (!pIemCpu->uRexB) { - if (!pIemCpu->uRexB) - { - u64EffAddr += pCtx->rbp; - SET_SS_DEF(); - } - else - u64EffAddr += pCtx->r13; + u64EffAddr += pCtx->rbp; + SET_SS_DEF(); } else - { - uint32_t u32Disp; - IEM_OPCODE_GET_NEXT_U32(&u32Disp); - u64EffAddr += (int32_t)u32Disp; - } - break; - } - break; + u64EffAddr += pCtx->r13; + } + else + { + uint32_t u32Disp; + IEM_OPCODE_GET_NEXT_U32(&u32Disp); + u64EffAddr += (int32_t)u32Disp; + } + break; + IEM_NOT_REACHED_DEFAULT_CASE_RET(); } - IEM_NOT_REACHED_DEFAULT_CASE_RET(); + break; } + IEM_NOT_REACHED_DEFAULT_CASE_RET(); + } - /* Get and add the displacement. */ - switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) + /* Get and add the displacement. */ + switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) + { + case 0: + break; + case 1: { - case 0: - break; - case 1: - { - int8_t i8Disp; - IEM_OPCODE_GET_NEXT_S8(&i8Disp); - u64EffAddr += i8Disp; - break; - } - case 2: - { - uint32_t u32Disp; - IEM_OPCODE_GET_NEXT_U32(&u32Disp); - u64EffAddr += (int32_t)u32Disp; - break; - } - IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */ + int8_t i8Disp; + IEM_OPCODE_GET_NEXT_S8(&i8Disp); + u64EffAddr += i8Disp; + break; } - + case 2: + { + uint32_t u32Disp; + IEM_OPCODE_GET_NEXT_U32(&u32Disp); + u64EffAddr += (int32_t)u32Disp; + break; + } + IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */ } - if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT) - *pGCPtrEff = u64EffAddr; - else - *pGCPtrEff = u64EffAddr & UINT16_MAX; - Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff)); - return VINF_SUCCESS; + + } + + if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT) + *pGCPtrEff = u64EffAddr; + else + { + Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT); + *pGCPtrEff = u64EffAddr & UINT32_MAX; } } - AssertFailedReturn(VERR_INTERNAL_ERROR_3); + Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff)); + return VINF_SUCCESS; } /** @} */ @@ -7470,8 +8618,8 @@ static void iemExecVerificationModeSetup(PIEMCPU pIemCpu) /* * Enable verification and/or logging. */ - pIemCpu->fNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */ - if ( pIemCpu->fNoRem + bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */; + if ( fNewNoRem && ( 0 #if 0 /* auto enable on first paged protected mode interrupt */ || ( pOrgCtx->eflags.Bits.u1IF @@ -7533,12 +8681,35 @@ static void iemExecVerificationModeSetup(PIEMCPU pIemCpu) #if 0 /* NT4SP1 - frstor [ecx] */ || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f) #endif +#if 0 /* xxxxxx - All long mode code. */ + || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA) +#endif +#if 0 /* rep movsq linux 3.7 64-bit boot. */ + || (pOrgCtx->rip == 0x0000000000100241) +#endif +#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */ + || (pOrgCtx->rip == 0x000000000215e240) +#endif +#if 0 /* DOS's size-overridden iret to v8086. */ + || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8) +#endif ) ) { RTLogGroupSettings(NULL, "iem.eo.l6.l2"); RTLogFlags(NULL, "enabled"); - pIemCpu->fNoRem = false; + fNewNoRem = false; + } + if (fNewNoRem != pIemCpu->fNoRem) + { + pIemCpu->fNoRem = fNewNoRem; + if (!fNewNoRem) + { + LogAlways(("Enabling verification mode!\n")); + CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL); + } + else + LogAlways(("Disabling verification mode!\n")); } /* @@ -7564,7 +8735,7 @@ static void iemExecVerificationModeSetup(PIEMCPU pIemCpu) TRPMEVENT enmType; RTGCUINT uErrCode; RTGCPTR uCr2; - int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2); AssertRC(rc2); + int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2); IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2); if (!IEM_VERIFICATION_ENABLED(pIemCpu)) TRPMResetTrap(pVCpu); @@ -7578,6 +8749,7 @@ static void iemExecVerificationModeSetup(PIEMCPU pIemCpu) pIemCpu->cIOWrites = 0; pIemCpu->fIgnoreRaxRdx = false; pIemCpu->fOverlappingMovs = false; + pIemCpu->fProblematicMemory = false; pIemCpu->fUndefinedEFlags = 0; if (IEM_VERIFICATION_ENABLED(pIemCpu)) @@ -7790,7 +8962,7 @@ static void iemVerifyAssertMsg2(PIEMCPU pIemCpu) PVM pVM = IEMCPU_TO_VM(pIemCpu); PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); char szRegs[4096]; - DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs), + DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs), "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n" "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n" "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n" @@ -7818,11 +8990,11 @@ static void iemVerifyAssertMsg2(PIEMCPU pIemCpu) ); char szInstr1[256]; - DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip, + DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szInstr1, sizeof(szInstr1), NULL); char szInstr2[256]; - DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0, + DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0, DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE, szInstr2, sizeof(szInstr2), NULL); @@ -7911,8 +9083,10 @@ static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, con * * @param pIemCpu The IEM per CPU data. * @param pEvtRec The write record. + * @param fRem Set if REM was doing the other executing. If clear + * it was HM. */ -static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec) +static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem) { uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf); Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb); @@ -7927,19 +9101,19 @@ static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec) && pEvtRec->u.RamWrite.cb != 2 && pEvtRec->u.RamWrite.cb != 4) ) { - /* fend off ROMs */ - if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000) - && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000) + /* fend off ROMs and MMIO */ + if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000) && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) ) { /* fend off fxsave */ if (pEvtRec->u.RamWrite.cb != 512) { + const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm"; RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__); RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys); - RTAssertMsg2Add("REM: %.*Rhxs\n" - "IEM: %.*Rhxs\n", - pEvtRec->u.RamWrite.cb, abBuf, + RTAssertMsg2Add("%s: %.*Rhxs\n" + "iem: %.*Rhxs\n", + pszWho, pEvtRec->u.RamWrite.cb, abBuf, pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab); iemVerifyAssertAddRecordDump(pEvtRec); iemVerifyAssertMsg2(pIemCpu); @@ -7970,11 +9144,44 @@ static void iemExecVerificationModeCheck(PIEMCPU pIemCpu) /* * Execute the instruction in REM. */ - PVM pVM = IEMCPU_TO_VM(pIemCpu); - EMRemLock(pVM); - int rc = REMR3EmulateInstruction(pVM, IEMCPU_TO_VMCPU(pIemCpu)); - AssertRC(rc); - EMRemUnlock(pVM); + bool fRem = false; + PVM pVM = IEMCPU_TO_VM(pIemCpu); + PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); + VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST; +#ifdef IEM_VERIFICATION_MODE_FULL_HM + if ( HMIsEnabled(pVM) + && pIemCpu->cIOReads == 0 + && pIemCpu->cIOWrites == 0 + && !pIemCpu->fProblematicMemory) + { + unsigned iLoops = 0; + do + { + rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE); + iLoops++; + } while ( rc == VINF_SUCCESS + || ( rc == VINF_EM_DBG_STEPPED + && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) + && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip) + || ( pOrgCtx->rip != pDebugCtx->rip + && pIemCpu->uInjectCpl != UINT8_MAX + && iLoops < 8) ); + } +#endif + if ( rc == VERR_EM_CANNOT_EXEC_GUEST + || rc == VINF_IOM_R3_IOPORT_READ + || rc == VINF_IOM_R3_IOPORT_WRITE + || rc == VINF_IOM_R3_MMIO_READ + || rc == VINF_IOM_R3_MMIO_READ_WRITE + || rc == VINF_IOM_R3_MMIO_WRITE + ) + { + EMRemLock(pVM); + rc = REMR3EmulateInstruction(pVM, pVCpu); + AssertRC(rc); + EMRemUnlock(pVM); + fRem = true; + } /* * Compare the register states. @@ -7983,55 +9190,59 @@ static void iemExecVerificationModeCheck(PIEMCPU pIemCpu) if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx))) { //Log(("REM and IEM ends up with different registers!\n")); + const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm"; # define CHECK_FIELD(a_Field) \ - do \ + do \ + { \ + if (pOrgCtx->a_Field != pDebugCtx->a_Field) \ { \ - if (pOrgCtx->a_Field != pDebugCtx->a_Field) \ + switch (sizeof(pOrgCtx->a_Field)) \ { \ - switch (sizeof(pOrgCtx->a_Field)) \ - { \ - case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \ - case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \ - case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \ - case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \ - default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \ - } \ - cDiffs++; \ + case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \ + case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \ + case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \ + case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \ + default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \ } \ - } while (0) + cDiffs++; \ + } \ + } while (0) # define CHECK_BIT_FIELD(a_Field) \ - do \ + do \ + { \ + if (pOrgCtx->a_Field != pDebugCtx->a_Field) \ { \ - if (pOrgCtx->a_Field != pDebugCtx->a_Field) \ - { \ - RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \ - cDiffs++; \ - } \ - } while (0) + RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \ + cDiffs++; \ + } \ + } while (0) # define CHECK_SEL(a_Sel) \ - do \ - { \ - CHECK_FIELD(a_Sel.Sel); \ - CHECK_FIELD(a_Sel.Attr.u); \ - CHECK_FIELD(a_Sel.u64Base); \ - CHECK_FIELD(a_Sel.u32Limit); \ - CHECK_FIELD(a_Sel.fFlags); \ - } while (0) + do \ + { \ + CHECK_FIELD(a_Sel.Sel); \ + CHECK_FIELD(a_Sel.Attr.u); \ + CHECK_FIELD(a_Sel.u64Base); \ + CHECK_FIELD(a_Sel.u32Limit); \ + CHECK_FIELD(a_Sel.fFlags); \ + } while (0) #if 1 /* The recompiler doesn't update these the intel way. */ - pOrgCtx->fpu.FOP = pDebugCtx->fpu.FOP; - pOrgCtx->fpu.FPUIP = pDebugCtx->fpu.FPUIP; - pOrgCtx->fpu.CS = pDebugCtx->fpu.CS; - pOrgCtx->fpu.Rsrvd1 = pDebugCtx->fpu.Rsrvd1; - pOrgCtx->fpu.FPUDP = pDebugCtx->fpu.FPUDP; - pOrgCtx->fpu.DS = pDebugCtx->fpu.DS; - pOrgCtx->fpu.Rsrvd2 = pDebugCtx->fpu.Rsrvd2; - pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK; /* only for the time being - old snapshots here. */ - if ((pOrgCtx->fpu.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->fpu.FSW & X86_FSW_TOP_MASK)) - pOrgCtx->fpu.FSW = pDebugCtx->fpu.FSW; + if (fRem) + { + pOrgCtx->fpu.FOP = pDebugCtx->fpu.FOP; + pOrgCtx->fpu.FPUIP = pDebugCtx->fpu.FPUIP; + pOrgCtx->fpu.CS = pDebugCtx->fpu.CS; + pOrgCtx->fpu.Rsrvd1 = pDebugCtx->fpu.Rsrvd1; + pOrgCtx->fpu.FPUDP = pDebugCtx->fpu.FPUDP; + pOrgCtx->fpu.DS = pDebugCtx->fpu.DS; + pOrgCtx->fpu.Rsrvd2 = pDebugCtx->fpu.Rsrvd2; + //pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK; + if ((pOrgCtx->fpu.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->fpu.FSW & X86_FSW_TOP_MASK)) + pOrgCtx->fpu.FSW = pDebugCtx->fpu.FSW; + } #endif if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu))) { @@ -8080,7 +9291,7 @@ static void iemExecVerificationModeCheck(PIEMCPU pIemCpu) uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags; if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask)) { - RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u); + RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u); CHECK_BIT_FIELD(rflags.Bits.u1CF); CHECK_BIT_FIELD(rflags.Bits.u1Reserved0); CHECK_BIT_FIELD(rflags.Bits.u1PF); @@ -8096,7 +9307,8 @@ static void iemExecVerificationModeCheck(PIEMCPU pIemCpu) CHECK_BIT_FIELD(rflags.Bits.u2IOPL); CHECK_BIT_FIELD(rflags.Bits.u1NT); CHECK_BIT_FIELD(rflags.Bits.u1Reserved3); - CHECK_BIT_FIELD(rflags.Bits.u1RF); + if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */ + CHECK_BIT_FIELD(rflags.Bits.u1RF); CHECK_BIT_FIELD(rflags.Bits.u1VM); CHECK_BIT_FIELD(rflags.Bits.u1AC); CHECK_BIT_FIELD(rflags.Bits.u1VIF); @@ -8127,16 +9339,18 @@ static void iemExecVerificationModeCheck(PIEMCPU pIemCpu) CHECK_SEL(fs); CHECK_SEL(gs); CHECK_FIELD(cr0); - /* Klugde #1: REM fetches code and accross the page boundrary and faults on the next page, while we execute + + /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */ /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */ if (pOrgCtx->cr2 != pDebugCtx->cr2) { - if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3) + if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem) { /* ignore */ } else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3) - && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0) + && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0 + && fRem) { /* ignore */ } else CHECK_FIELD(cr2); @@ -8148,7 +9362,7 @@ static void iemExecVerificationModeCheck(PIEMCPU pIemCpu) CHECK_FIELD(dr[2]); CHECK_FIELD(dr[3]); CHECK_FIELD(dr[6]); - if ((pOrgCtx->dr[7] & ~X86_DR7_MB1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_MB1_MASK)) /* REM 'mov drX,greg' bug.*/ + if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/ CHECK_FIELD(dr[7]); CHECK_FIELD(gdtr.cbGdt); CHECK_FIELD(gdtr.pGdt); @@ -8169,7 +9383,7 @@ static void iemExecVerificationModeCheck(PIEMCPU pIemCpu) if (cDiffs != 0) { - DBGFR3Info(pVM, "cpumguest", "verbose", NULL); + DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL); RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); iemVerifyAssertMsg2(pIemCpu); RTAssertPanic(); @@ -8199,7 +9413,7 @@ static void iemExecVerificationModeCheck(PIEMCPU pIemCpu) && pIemRec->pNext) { if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE) - iemVerifyWriteRecord(pIemCpu, pIemRec); + iemVerifyWriteRecord(pIemCpu, pIemRec, fRem); pIemRec = pIemRec->pNext; } @@ -8249,13 +9463,13 @@ static void iemExecVerificationModeCheck(PIEMCPU pIemCpu) while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)) { if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE) - iemVerifyWriteRecord(pIemCpu, pIemRec); + iemVerifyWriteRecord(pIemCpu, pIemRec, fRem); pIemRec = pIemRec->pNext; } if (pIemRec != NULL) iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!"); else if (pOtherRec != NULL) - iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra Other record!"); + iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!"); } pIemCpu->CTX_SUFF(pCtx) = pOrgCtx; } @@ -8278,52 +9492,91 @@ static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, #endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */ +#ifdef LOG_ENABLED /** - * The actual code execution bits of IEMExecOne, IEMExecOneEx, and - * IEMExecOneWithPrefetchedByPC. - * - * @return Strict VBox status code. - * @param pVCpu The current virtual CPU. - * @param pIemCpu The IEM per CPU data. - * @param fExecuteInhibit If set, execute the instruction following CLI, - * POP SS and MOV SS,GR. + * Logs the current instruction. + * @param pVCpu The cross context virtual CPU structure of the caller. + * @param pCtx The current CPU context. + * @param fSameCtx Set if we have the same context information as the VMM, + * clear if we may have already executed an instruction in + * our debug context. When clear, we assume IEMCPU holds + * valid CPU mode info. */ -DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit) +static void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx) { - uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); - VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]); - if (rcStrict == VINF_SUCCESS) - pIemCpu->cInstructions++; -//#ifdef DEBUG -// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr)); -//#endif - - /* Execute the next instruction as well if a cli, pop ss or - mov ss, Gr has just completed successfully. */ - if ( fExecuteInhibit - && rcStrict == VINF_SUCCESS - && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) - && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip ) +# ifdef IN_RING3 + if (LogIs2Enabled()) { - rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers); - if (rcStrict == VINF_SUCCESS) + char szInstr[256]; + uint32_t cbInstr = 0; + if (fSameCtx) + DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0, + DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE, + szInstr, sizeof(szInstr), &cbInstr); + else { - b; IEM_OPCODE_GET_NEXT_U8(&b); - rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]); - if (rcStrict == VINF_SUCCESS) - pIemCpu->cInstructions++; + uint32_t fFlags = 0; + switch (pVCpu->iem.s.enmCpuMode) + { + case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break; + case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break; + case IEMMODE_16BIT: + if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM) + fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE; + else + fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE; + break; + } + DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags, + szInstr, sizeof(szInstr), &cbInstr); } - EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111)); + + Log2(("****\n" + " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n" + " eip=%08x esp=%08x ebp=%08x iopl=%d\n" + " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n" + " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n" + " %s\n" + , + pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi, + pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, + pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel, + pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u, + pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK, + szInstr)); + + if (LogIs3Enabled()) + DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL); } + else +# endif + LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", + pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u)); +} +#endif - /* - * Return value fiddling and statistics. - */ + +/** + * Makes status code addjustments (pass up from I/O and access handler) + * as well as maintaining statistics. + * + * @returns Strict VBox status code to pass up. + * @param pIemCpu The IEM per CPU data. + * @param rcStrict The status from executing an instruction. + */ +DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict) +{ if (rcStrict != VINF_SUCCESS) { if (RT_SUCCESS(rcStrict)) { - AssertMsg(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); + AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) + || rcStrict == VINF_IOM_R3_IOPORT_READ + || rcStrict == VINF_IOM_R3_IOPORT_WRITE + || rcStrict == VINF_IOM_R3_MMIO_READ + || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE + || rcStrict == VINF_IOM_R3_MMIO_WRITE + , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); int32_t const rcPassUp = pIemCpu->rcPassUp; if (rcPassUp == VINF_SUCCESS) pIemCpu->cRetInfStatuses++; @@ -8358,6 +9611,60 @@ DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, b rcStrict = pIemCpu->rcPassUp; } + return rcStrict; +} + + +/** + * The actual code execution bits of IEMExecOne, IEMExecOneEx, and + * IEMExecOneWithPrefetchedByPC. + * + * @return Strict VBox status code. + * @param pVCpu The current virtual CPU. + * @param pIemCpu The IEM per CPU data. + * @param fExecuteInhibit If set, execute the instruction following CLI, + * POP SS and MOV SS,GR. + */ +DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit) +{ + uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); + VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]); + if (rcStrict == VINF_SUCCESS) + pIemCpu->cInstructions++; + if (pIemCpu->cActiveMappings > 0) + iemMemRollback(pIemCpu); +//#ifdef DEBUG +// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr)); +//#endif + + /* Execute the next instruction as well if a cli, pop ss or + mov ss, Gr has just completed successfully. */ + if ( fExecuteInhibit + && rcStrict == VINF_SUCCESS + && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) + && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip ) + { + rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers); + if (rcStrict == VINF_SUCCESS) + { +# ifdef LOG_ENABLED + iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false); +# endif + b; IEM_OPCODE_GET_NEXT_U8(&b); + rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]); + if (rcStrict == VINF_SUCCESS) + pIemCpu->cInstructions++; + if (pIemCpu->cActiveMappings > 0) + iemMemRollback(pIemCpu); + } + EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111)); + } + + /* + * Return value fiddling, statistics and sanity assertions. + */ + rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict); + Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs)); Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss)); #if defined(IEM_VERIFICATION_MODE_FULL) @@ -8370,6 +9677,25 @@ DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, b } +#ifdef IN_RC +/** + * Re-enters raw-mode or ensure we return to ring-3. + * + * @returns rcStrict, maybe modified. + * @param pIemCpu The IEM CPU structure. + * @param pVCpu The cross context virtual CPU structure of the caller. + * @param pCtx The current CPU context. + * @param rcStrict The status code returne by the interpreter. + */ +DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict) +{ + if (!pIemCpu->fInPatchCode) + CPUMRawEnter(pVCpu, CPUMCTX2CORE(pCtx)); + return rcStrict; +} +#endif + + /** * Execute one instruction. * @@ -8385,36 +9711,7 @@ VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu) #endif #ifdef LOG_ENABLED PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); -# ifdef IN_RING3 - if (LogIs2Enabled()) - { - char szInstr[256]; - uint32_t cbInstr = 0; - DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, 0, 0, - DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE, - szInstr, sizeof(szInstr), &cbInstr); - - Log3(("**** " - " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n" - " eip=%08x esp=%08x ebp=%08x iopl=%d\n" - " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n" - " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n" - " %s\n" - , - pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi, - pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, - pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel, - pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u, - pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK, - szInstr)); - - if (LogIs3Enabled()) - DBGFR3Info(pVCpu->pVMR3, "cpumguest", "verbose", NULL); - } - else -# endif - LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", - pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u)); + iemLogCurInstr(pVCpu, pCtx, true); #endif /* @@ -8430,6 +9727,9 @@ VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu) */ iemExecVerificationModeCheck(pIemCpu); #endif +#ifdef IN_RC + rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict); +#endif if (rcStrict != VINF_SUCCESS) LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n", pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict))); @@ -8443,9 +9743,7 @@ VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, ui PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3); - iemInitDecoder(pIemCpu, false); uint32_t const cbOldWritten = pIemCpu->cbWritten; - VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false); if (rcStrict == VINF_SUCCESS) { @@ -8453,6 +9751,10 @@ VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, ui if (pcbWritten) *pcbWritten = pIemCpu->cbWritten - cbOldWritten; } + +#ifdef IN_RC + rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict); +#endif return rcStrict; } @@ -8479,6 +9781,10 @@ VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXC { rcStrict = iemExecOneInner(pVCpu, pIemCpu, true); } + +#ifdef IN_RC + rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict); +#endif return rcStrict; } @@ -8489,9 +9795,7 @@ VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCo PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3); - iemInitDecoder(pIemCpu, true); uint32_t const cbOldWritten = pIemCpu->cbWritten; - VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true); if (rcStrict == VINF_SUCCESS) { @@ -8499,6 +9803,10 @@ VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCo if (pcbWritten) *pcbWritten = pIemCpu->cbWritten - cbOldWritten; } + +#ifdef IN_RC + rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict); +#endif return rcStrict; } @@ -8523,13 +9831,75 @@ VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCP rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true); if (rcStrict == VINF_SUCCESS) rcStrict = iemExecOneInner(pVCpu, pIemCpu, false); + +#ifdef IN_RC + rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict); +#endif return rcStrict; } VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu) { - return IEMExecOne(pVCpu); + PIEMCPU pIemCpu = &pVCpu->iem.s; + + /* + * See if there is an interrupt pending in TRPM and inject it if we can. + */ +#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3) + PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); +# ifdef IEM_VERIFICATION_MODE_FULL + pIemCpu->uInjectCpl = UINT8_MAX; +# endif + if ( pCtx->eflags.Bits.u1IF + && TRPMHasTrap(pVCpu) + && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip) + { + uint8_t u8TrapNo; + TRPMEVENT enmType; + RTGCUINT uErrCode; + RTGCPTR uCr2; + int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2); + IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2); + if (!IEM_VERIFICATION_ENABLED(pIemCpu)) + TRPMResetTrap(pVCpu); + } +#else + iemExecVerificationModeSetup(pIemCpu); + PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); +#endif + + /* + * Log the state. + */ +#ifdef LOG_ENABLED + iemLogCurInstr(pVCpu, pCtx, true); +#endif + + /* + * Do the decoding and emulation. + */ + VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false); + if (rcStrict == VINF_SUCCESS) + rcStrict = iemExecOneInner(pVCpu, pIemCpu, true); + +#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3) + /* + * Assert some sanity. + */ + iemExecVerificationModeCheck(pIemCpu); +#endif + + /* + * Maybe re-enter raw-mode and log. + */ +#ifdef IN_RC + rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict); +#endif + if (rcStrict != VINF_SUCCESS) + LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n", + pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict))); + return rcStrict; } @@ -8550,6 +9920,10 @@ VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu) VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2) { iemInitDecoder(&pVCpu->iem.s, false); +#ifdef DBGFTRACE_ENABLED + RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx", + u8TrapNo, enmType, uErrCode, uCr2); +#endif uint32_t fFlags; switch (enmType) @@ -8630,3 +10004,240 @@ VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore) } #endif + + +/** + * Interface for HM and EM for executing string I/O OUT (write) instructions. + * + * This API ASSUMES that the caller has already verified that the guest code is + * allowed to access the I/O port. (The I/O port is in the DX register in the + * guest state.) + * + * @returns Strict VBox status code. + * @param pVCpu The cross context per virtual CPU structure. + * @param cbValue The size of the I/O port access (1, 2, or 4). + * @param enmAddrMode The addressing mode. + * @param fRepPrefix Indicates whether a repeat prefix is used + * (doesn't matter which for this instruction). + * @param cbInstr The instruction length in bytes. + * @param iEffSeg The effective segment address. + */ +VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode, + bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg) +{ + AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG); + AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH); + + /* + * State init. + */ + PIEMCPU pIemCpu = &pVCpu->iem.s; + iemInitExec(pIemCpu, false /*fBypassHandlers*/); + + /* + * Switch orgy for getting to the right handler. + */ + VBOXSTRICTRC rcStrict; + if (fRepPrefix) + { + switch (enmAddrMode) + { + case IEMMODE_16BIT: + switch (cbValue) + { + case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + default: + AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); + } + break; + + case IEMMODE_32BIT: + switch (cbValue) + { + case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + default: + AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); + } + break; + + case IEMMODE_64BIT: + switch (cbValue) + { + case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + default: + AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); + } + break; + + default: + AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE); + } + } + else + { + switch (enmAddrMode) + { + case IEMMODE_16BIT: + switch (cbValue) + { + case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + default: + AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); + } + break; + + case IEMMODE_32BIT: + switch (cbValue) + { + case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + default: + AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); + } + break; + + case IEMMODE_64BIT: + switch (cbValue) + { + case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; + default: + AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); + } + break; + + default: + AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE); + } + } + + return iemExecStatusCodeFiddling(pIemCpu, rcStrict); +} + + +/** + * Interface for HM and EM for executing string I/O IN (read) instructions. + * + * This API ASSUMES that the caller has already verified that the guest code is + * allowed to access the I/O port. (The I/O port is in the DX register in the + * guest state.) + * + * @returns Strict VBox status code. + * @param pVCpu The cross context per virtual CPU structure. + * @param cbValue The size of the I/O port access (1, 2, or 4). + * @param enmAddrMode The addressing mode. + * @param fRepPrefix Indicates whether a repeat prefix is used + * (doesn't matter which for this instruction). + * @param cbInstr The instruction length in bytes. + */ +VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode, + bool fRepPrefix, uint8_t cbInstr) +{ + AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH); + + /* + * State init. + */ + PIEMCPU pIemCpu = &pVCpu->iem.s; + iemInitExec(pIemCpu, false /*fBypassHandlers*/); + + /* + * Switch orgy for getting to the right handler. + */ + VBOXSTRICTRC rcStrict; + if (fRepPrefix) + { + switch (enmAddrMode) + { + case IEMMODE_16BIT: + switch (cbValue) + { + case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break; + case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break; + case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break; + default: + AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); + } + break; + + case IEMMODE_32BIT: + switch (cbValue) + { + case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break; + case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break; + case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break; + default: + AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); + } + break; + + case IEMMODE_64BIT: + switch (cbValue) + { + case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break; + case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break; + case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break; + default: + AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); + } + break; + + default: + AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE); + } + } + else + { + switch (enmAddrMode) + { + case IEMMODE_16BIT: + switch (cbValue) + { + case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break; + case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break; + case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break; + default: + AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); + } + break; + + case IEMMODE_32BIT: + switch (cbValue) + { + case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break; + case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break; + case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break; + default: + AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); + } + break; + + case IEMMODE_64BIT: + switch (cbValue) + { + case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break; + case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break; + case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break; + default: + AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); + } + break; + + default: + AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE); + } + } + + return iemExecStatusCodeFiddling(pIemCpu, rcStrict); +} + diff --git a/src/VBox/VMM/VMMAll/IEMAllAImpl.asm b/src/VBox/VMM/VMMAll/IEMAllAImpl.asm index 12322571..fc0ea984 100644 --- a/src/VBox/VMM/VMMAll/IEMAllAImpl.asm +++ b/src/VBox/VMM/VMMAll/IEMAllAImpl.asm @@ -354,12 +354,7 @@ BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 16 IEM_SAVE_FLAGS A2, %3, %4 EPILOGUE_3_ARGS_EX 8 ENDPROC iemAImpl_ %+ %1 %+ _u64 - %else ; stub it for now - later, replace with hand coded stuff. -BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 16 - int3 - ret -ENDPROC iemAImpl_ %+ %1 %+ _u64 - %endif ; !RT_ARCH_AMD64 + %endif ; RT_ARCH_AMD64 %if %2 != 0 ; locked versions requested? @@ -395,12 +390,7 @@ BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64_locked, 16 IEM_SAVE_FLAGS A2, %3, %4 EPILOGUE_3_ARGS_EX 8 ENDPROC iemAImpl_ %+ %1 %+ _u64_locked - %else ; stub it for now - later, replace with hand coded stuff. -BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64_locked, 16 - int3 - ret 8 -ENDPROC iemAImpl_ %+ %1 %+ _u64_locked - %endif ; !RT_ARCH_AMD64 + %endif ; RT_ARCH_AMD64 %endif ; locked %endmacro @@ -409,11 +399,11 @@ IEMIMPL_BIN_OP add, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86 IEMIMPL_BIN_OP adc, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0 IEMIMPL_BIN_OP sub, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0 IEMIMPL_BIN_OP sbb, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0 -IEMIMPL_BIN_OP or, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF, -IEMIMPL_BIN_OP xor, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF, -IEMIMPL_BIN_OP and, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF, +IEMIMPL_BIN_OP or, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF +IEMIMPL_BIN_OP xor, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF +IEMIMPL_BIN_OP and, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF IEMIMPL_BIN_OP cmp, 0, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0 -IEMIMPL_BIN_OP test, 0, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF, +IEMIMPL_BIN_OP test, 0, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF ;; @@ -457,12 +447,7 @@ BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 16 IEM_SAVE_FLAGS A2, %3, %4 EPILOGUE_3_ARGS_EX 8 ENDPROC iemAImpl_ %+ %1 %+ _u64 - %else ; stub it for now - later, replace with hand coded stuff. -BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 16 - int3 - ret 8 -ENDPROC iemAImpl_ %+ %1 %+ _u64 - %endif ; !RT_ARCH_AMD64 + %endif ; RT_ARCH_AMD64 %if %2 != 0 ; locked versions requested? @@ -490,12 +475,7 @@ BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64_locked, 16 IEM_SAVE_FLAGS A2, %3, %4 EPILOGUE_3_ARGS_EX 8 ENDPROC iemAImpl_ %+ %1 %+ _u64_locked - %else ; stub it for now - later, replace with hand coded stuff. -BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64_locked, 16 - int3 - ret 8 -ENDPROC iemAImpl_ %+ %1 %+ _u64_locked - %endif ; !RT_ARCH_AMD64 + %endif ; RT_ARCH_AMD64 %endif ; locked %endmacro IEMIMPL_BIT_OP bt, 0, (X86_EFL_CF), (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF) @@ -551,12 +531,7 @@ BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 16 IEM_SAVE_FLAGS A2, %2, %3 EPILOGUE_3_ARGS_EX 8 ENDPROC iemAImpl_ %+ %1 %+ _u64 - %else ; stub it for now - later, replace with hand coded stuff. -BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 16 - int3 - ret 8 -ENDPROC iemAImpl_ %+ %1 %+ _u64 - %endif ; !RT_ARCH_AMD64 + %endif ; RT_ARCH_AMD64 %endmacro IEMIMPL_BIT_OP bsf, (X86_EFL_ZF), (X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF) IEMIMPL_BIT_OP bsr, (X86_EFL_ZF), (X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF) @@ -585,18 +560,16 @@ BEGINPROC_FASTCALL iemAImpl_imul_two_u32, 12 EPILOGUE_3_ARGS ENDPROC iemAImpl_imul_two_u32 +%ifdef RT_ARCH_AMD64 BEGINPROC_FASTCALL iemAImpl_imul_two_u64, 16 PROLOGUE_3_ARGS -%ifdef RT_ARCH_AMD64 IEM_MAYBE_LOAD_FLAGS A2, (X86_EFL_OF | X86_EFL_CF), (X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF) imul A1, qword [A0] mov [A0], A1 IEM_SAVE_FLAGS A2, (X86_EFL_OF | X86_EFL_CF), (X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF) -%else - int3 ;; @todo implement me -%endif EPILOGUE_3_ARGS_EX 8 ENDPROC iemAImpl_imul_two_u64 +%endif ; RT_ARCH_AMD64 ; @@ -630,18 +603,15 @@ BEGINPROC_FASTCALL iemAImpl_xchg_u32, 8 EPILOGUE_2_ARGS ENDPROC iemAImpl_xchg_u32 -BEGINPROC_FASTCALL iemAImpl_xchg_u64, 8 %ifdef RT_ARCH_AMD64 +BEGINPROC_FASTCALL iemAImpl_xchg_u64, 8 PROLOGUE_2_ARGS mov T0, [A1] xchg [A0], T0 mov [A1], T0 EPILOGUE_2_ARGS -%else - int3 - ret 0 -%endif ENDPROC iemAImpl_xchg_u64 +%endif ; @@ -682,8 +652,8 @@ BEGINPROC_FASTCALL iemAImpl_xadd_u32, 12 EPILOGUE_3_ARGS ENDPROC iemAImpl_xadd_u32 -BEGINPROC_FASTCALL iemAImpl_xadd_u64, 12 %ifdef RT_ARCH_AMD64 +BEGINPROC_FASTCALL iemAImpl_xadd_u64, 12 PROLOGUE_3_ARGS IEM_MAYBE_LOAD_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0 mov T0, [A1] @@ -691,11 +661,8 @@ BEGINPROC_FASTCALL iemAImpl_xadd_u64, 12 mov [A1], T0 IEM_SAVE_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0 EPILOGUE_3_ARGS -%else - int3 - ret 4 -%endif ENDPROC iemAImpl_xadd_u64 +%endif ; RT_ARCH_AMD64 BEGINPROC_FASTCALL iemAImpl_xadd_u8_locked, 12 PROLOGUE_3_ARGS @@ -727,8 +694,8 @@ BEGINPROC_FASTCALL iemAImpl_xadd_u32_locked, 12 EPILOGUE_3_ARGS ENDPROC iemAImpl_xadd_u32_locked -BEGINPROC_FASTCALL iemAImpl_xadd_u64_locked, 12 %ifdef RT_ARCH_AMD64 +BEGINPROC_FASTCALL iemAImpl_xadd_u64_locked, 12 PROLOGUE_3_ARGS IEM_MAYBE_LOAD_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0 mov T0, [A1] @@ -736,11 +703,8 @@ BEGINPROC_FASTCALL iemAImpl_xadd_u64_locked, 12 mov [A1], T0 IEM_SAVE_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0 EPILOGUE_3_ARGS -%else - int3 - ret 4 -%endif ENDPROC iemAImpl_xadd_u64_locked +%endif ; RT_ARCH_AMD64 ; @@ -882,9 +846,9 @@ BEGINPROC_FASTCALL iemAImpl_cmpxchg_u64 %+ %2, 16 %ifdef RT_ARCH_AMD64 PROLOGUE_4_ARGS IEM_MAYBE_LOAD_FLAGS A3, (X86_EFL_ZF | X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF), 0 ; clobbers T0 (eax) - mov ax, [A1] + mov rax, [A1] %1 cmpxchg [A0], A2 - mov [A1], ax + mov [A1], rax IEM_SAVE_FLAGS A3, (X86_EFL_ZF | X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF), 0 ; clobbers T0+T1 (eax, r11/edi) EPILOGUE_4_ARGS %else @@ -1016,17 +980,7 @@ BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64_locked, 8 IEM_SAVE_FLAGS A1, %2, %3 EPILOGUE_2_ARGS ENDPROC iemAImpl_ %+ %1 %+ _u64_locked - %else - ; stub them for now. -BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 8 - int3 - ret 0 -ENDPROC iemAImpl_ %+ %1 %+ _u64 -BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64_locked, 8 - int3 - ret 0 -ENDPROC iemAImpl_ %+ %1 %+ _u64_locked - %endif + %endif ; RT_ARCH_AMD64 %endmacro @@ -1036,6 +990,36 @@ IEMIMPL_UNARY_OP neg, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_E IEMIMPL_UNARY_OP not, 0, 0 +;; +; Macro for implementing memory fence operation. +; +; No return value, no operands or anything. +; +; @param 1 The instruction. +; +%macro IEMIMPL_MEM_FENCE 1 +BEGINCODE +BEGINPROC_FASTCALL iemAImpl_ %+ %1, 0 + %1 + ret +ENDPROC iemAImpl_ %+ %1 +%endmacro + +IEMIMPL_MEM_FENCE lfence +IEMIMPL_MEM_FENCE sfence +IEMIMPL_MEM_FENCE mfence + +;; +; Alternative for non-SSE2 host. +; +BEGINPROC_FASTCALL iemAImpl_alt_mem_fence, 0 + push xAX + xchg xAX, [xSP] + add xSP, xCB + ret +ENDPROC iemAImpl_alt_mem_fence + + ;; ; Macro for implementing a shift operation. @@ -1110,12 +1094,7 @@ BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 12 IEM_SAVE_FLAGS A2, %2, %3 EPILOGUE_3_ARGS ENDPROC iemAImpl_ %+ %1 %+ _u64 - %else ; stub it for now - later, replace with hand coded stuff. -BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 12 - int3 - ret 4 -ENDPROC iemAImpl_ %+ %1 %+ _u64 - %endif ; !RT_ARCH_AMD64 + %endif ; RT_ARCH_AMD64 %endmacro @@ -1190,12 +1169,7 @@ BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 20 IEM_SAVE_FLAGS A3, %2, %3 EPILOGUE_4_ARGS_EX 12 ENDPROC iemAImpl_ %+ %1 %+ _u64 - %else ; stub it for now - later, replace with hand coded stuff. -BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 20 - int3 - ret 12 -ENDPROC iemAImpl_ %+ %1 %+ _u64 - %endif ; !RT_ARCH_AMD64 + %endif ; RT_ARCH_AMD64 %endmacro @@ -1273,31 +1247,26 @@ BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32, 16 EPILOGUE_4_ARGS ENDPROC iemAImpl_ %+ %1 %+ _u32 - %ifdef RT_ARCH_AMD64 + %ifdef RT_ARCH_AMD64 ; The 32-bit host version lives in IEMAllAImplC.cpp. BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 20 PROLOGUE_4_ARGS IEM_MAYBE_LOAD_FLAGS A3, %2, %3 mov rax, [A0] - %ifdef ASM_CALL64_GCC + %ifdef ASM_CALL64_GCC %1 A2 mov [A0], rax mov [A1], rdx - %else + %else mov T1, A1 %1 A2 mov [A0], rax mov [T1], rdx - %endif + %endif IEM_SAVE_FLAGS A3, %2, %3 xor eax, eax EPILOGUE_4_ARGS_EX 12 ENDPROC iemAImpl_ %+ %1 %+ _u64 - %else ; stub it for now - later, replace with hand coded stuff. -BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 20 - int3 - ret 12 -ENDPROC iemAImpl_ %+ %1 %+ _u64 - %endif ; !RT_ARCH_AMD64 + %endif ; !RT_ARCH_AMD64 %endmacro @@ -1305,6 +1274,36 @@ IEMIMPL_MUL_OP mul, (X86_EFL_OF | X86_EFL_CF), (X86_EFL_SF | X86_EFL_ZF | X86_E IEMIMPL_MUL_OP imul, (X86_EFL_OF | X86_EFL_CF), (X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF) +BEGINCODE +;; +; Worker function for negating a 32-bit number in T1:T0 +; @uses None (T0,T1) +iemAImpl_negate_T0_T1_u32: + push 0 + push 0 + xchg T0_32, [xSP] + xchg T1_32, [xSP + xCB] + sub T0_32, [xSP] + sbb T1_32, [xSP + xCB] + add xSP, xCB*2 + ret + +%ifdef RT_ARCH_AMD64 +;; +; Worker function for negating a 64-bit number in T1:T0 +; @uses None (T0,T1) +iemAImpl_negate_T0_T1_u64: + push 0 + push 0 + xchg T0, [xSP] + xchg T1, [xSP + xCB] + sub T0, [xSP] + sbb T1, [xSP + xCB] + add xSP, xCB*2 + ret +%endif + + ;; ; Macro for implementing a division operations. ; @@ -1321,17 +1320,56 @@ IEMIMPL_MUL_OP imul, (X86_EFL_OF | X86_EFL_CF), (X86_EFL_SF | X86_EFL_ZF | X86_E ; @param 1 The instruction mnemonic. ; @param 2 The modified flags. ; @param 3 The undefined flags. +; @param 4 1 if signed, 0 if unsigned. ; ; Makes ASSUMPTIONS about A0, A1, A2, A3, T0 and T1 assignments. ; -%macro IEMIMPL_DIV_OP 3 +%macro IEMIMPL_DIV_OP 4 BEGINCODE BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u8, 12 PROLOGUE_3_ARGS + ; div by chainsaw check. test A1_8, A1_8 jz .div_zero - ;; @todo test for overflow + + ; Overflow check - unsigned division is simple to verify, haven't + ; found a simple way to check signed division yet unfortunately. + %if %4 == 0 + cmp [A0 + 1], A1_8 + jae .div_overflow + %else + mov T0_16, [A0] ; T0 = dividend + mov T1, A1 ; T1 = saved divisor (because of missing T1_8 in 32-bit) + test A1_8, A1_8 + js .divisor_negative + test T0_16, T0_16 + jns .both_positive + neg T0_16 +.one_of_each: ; OK range is 2^(result-with - 1) + (divisor - 1). + push T0 ; Start off like unsigned below. + shr T0_16, 7 + cmp T0_8, A1_8 + pop T0 + jb .div_no_overflow + ja .div_overflow + and T0_8, 0x7f ; Special case for covering (divisor - 1). + cmp T0_8, A1_8 + jae .div_overflow + jmp .div_no_overflow + +.divisor_negative: + neg A1_8 + test T0_16, T0_16 + jns .one_of_each + neg T0_16 +.both_positive: ; Same as unsigned shifted by sign indicator bit. + shr T0_16, 7 + cmp T0_8, A1_8 + jae .div_overflow +.div_no_overflow: + mov A1, T1 ; restore divisor + %endif IEM_MAYBE_LOAD_FLAGS A2, %2, %3 mov ax, [A0] @@ -1344,6 +1382,7 @@ BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u8, 12 EPILOGUE_3_ARGS .div_zero: +.div_overflow: mov eax, -1 jmp .return ENDPROC iemAImpl_ %+ %1 %+ _u8 @@ -1351,9 +1390,48 @@ ENDPROC iemAImpl_ %+ %1 %+ _u8 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u16, 16 PROLOGUE_4_ARGS - test A1_16, A1_16 + ; div by chainsaw check. + test A2_16, A2_16 jz .div_zero - ;; @todo test for overflow + + ; Overflow check - unsigned division is simple to verify, haven't + ; found a simple way to check signed division yet unfortunately. + %if %4 == 0 + cmp [A1], A2_16 + jae .div_overflow + %else + mov T0_16, [A1] + shl T0_32, 16 + mov T0_16, [A0] ; T0 = dividend + mov T1, A2 ; T1 = divisor + test T1_16, T1_16 + js .divisor_negative + test T0_32, T0_32 + jns .both_positive + neg T0_32 +.one_of_each: ; OK range is 2^(result-with - 1) + (divisor - 1). + push T0 ; Start off like unsigned below. + shr T0_32, 15 + cmp T0_16, T1_16 + pop T0 + jb .div_no_overflow + ja .div_overflow + and T0_16, 0x7fff ; Special case for covering (divisor - 1). + cmp T0_16, T1_16 + jae .div_overflow + jmp .div_no_overflow + +.divisor_negative: + neg T1_16 + test T0_32, T0_32 + jns .one_of_each + neg T0_32 +.both_positive: ; Same as unsigned shifted by sign indicator bit. + shr T0_32, 15 + cmp T0_16, T1_16 + jae .div_overflow +.div_no_overflow: + %endif IEM_MAYBE_LOAD_FLAGS A3, %2, %3 %ifdef ASM_CALL64_GCC @@ -1378,6 +1456,7 @@ BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u16, 16 EPILOGUE_4_ARGS .div_zero: +.div_overflow: mov eax, -1 jmp .return ENDPROC iemAImpl_ %+ %1 %+ _u16 @@ -1385,9 +1464,52 @@ ENDPROC iemAImpl_ %+ %1 %+ _u16 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32, 16 PROLOGUE_4_ARGS - test A1_32, A1_32 + ; div by chainsaw check. + test A2_32, A2_32 jz .div_zero - ;; @todo test for overflow + + ; Overflow check - unsigned division is simple to verify, haven't + ; found a simple way to check signed division yet unfortunately. + %if %4 == 0 + cmp [A1], A2_32 + jae .div_overflow + %else + push A2 ; save A2 so we modify it (we out of regs on x86). + mov T0_32, [A0] ; T0 = dividend low + mov T1_32, [A1] ; T1 = dividend high + test A2_32, A2_32 + js .divisor_negative + test T1_32, T1_32 + jns .both_positive + call iemAImpl_negate_T0_T1_u32 +.one_of_each: ; OK range is 2^(result-with - 1) + (divisor - 1). + push T0 ; Start off like unsigned below. + shl T1_32, 1 + shr T0_32, 31 + or T1_32, T0_32 + cmp T1_32, A2_32 + pop T0 + jb .div_no_overflow + ja .div_overflow + and T0_32, 0x7fffffff ; Special case for covering (divisor - 1). + cmp T0_32, A2_32 + jae .div_overflow + jmp .div_no_overflow + +.divisor_negative: + neg A2_32 + test T1_32, T1_32 + jns .one_of_each + call iemAImpl_negate_T0_T1_u32 +.both_positive: ; Same as unsigned shifted by sign indicator bit. + shl T1_32, 1 + shr T0_32, 31 + or T1_32, T0_32 + cmp T1_32, A2_32 + jae .div_overflow +.div_no_overflow: + pop A2 + %endif IEM_MAYBE_LOAD_FLAGS A3, %2, %3 mov eax, [A0] @@ -1412,57 +1534,100 @@ BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32, 16 .return: EPILOGUE_4_ARGS +.div_overflow: + %if %4 != 0 + pop A2 + %endif .div_zero: mov eax, -1 jmp .return ENDPROC iemAImpl_ %+ %1 %+ _u32 - %ifdef RT_ARCH_AMD64 + %ifdef RT_ARCH_AMD64 ; The 32-bit host version lives in IEMAllAImplC.cpp. BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 20 PROLOGUE_4_ARGS - test A1, A1 + test A2, A2 jz .div_zero - ;; @todo test for overflow + %if %4 == 0 + cmp [A1], A2 + jae .div_overflow + %else + push A2 ; save A2 so we modify it (we out of regs on x86). + mov T0, [A0] ; T0 = dividend low + mov T1, [A1] ; T1 = dividend high + test A2, A2 + js .divisor_negative + test T1, T1 + jns .both_positive + call iemAImpl_negate_T0_T1_u64 +.one_of_each: ; OK range is 2^(result-with - 1) + (divisor - 1). + push T0 ; Start off like unsigned below. + shl T1, 1 + shr T0, 63 + or T1, T0 + cmp T1, A2 + pop T0 + jb .div_no_overflow + ja .div_overflow + mov T1, 0x7fffffffffffffff + and T0, T1 ; Special case for covering (divisor - 1). + cmp T0, A2 + jae .div_overflow + jmp .div_no_overflow + +.divisor_negative: + neg A2 + test T1, T1 + jns .one_of_each + call iemAImpl_negate_T0_T1_u64 +.both_positive: ; Same as unsigned shifted by sign indicator bit. + shl T1, 1 + shr T0, 63 + or T1, T0 + cmp T1, A2 + jae .div_overflow +.div_no_overflow: + pop A2 + %endif IEM_MAYBE_LOAD_FLAGS A3, %2, %3 mov rax, [A0] - %ifdef ASM_CALL64_GCC + %ifdef ASM_CALL64_GCC mov T1, A2 mov rax, [A0] mov rdx, [A1] %1 T1 mov [A0], rax mov [A1], rdx - %else + %else mov T1, A1 mov rax, [A0] mov rdx, [T1] %1 A2 mov [A0], rax mov [T1], rdx - %endif + %endif IEM_SAVE_FLAGS A3, %2, %3 xor eax, eax .return: EPILOGUE_4_ARGS_EX 12 +.div_overflow: + %if %4 != 0 + pop A2 + %endif .div_zero: mov eax, -1 jmp .return ENDPROC iemAImpl_ %+ %1 %+ _u64 - %else ; stub it for now - later, replace with hand coded stuff. -BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 20 - int3 - ret -ENDPROC iemAImpl_ %+ %1 %+ _u64 - %endif ; !RT_ARCH_AMD64 + %endif ; !RT_ARCH_AMD64 %endmacro -IEMIMPL_DIV_OP div, 0, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF) -IEMIMPL_DIV_OP idiv, 0, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF) +IEMIMPL_DIV_OP div, 0, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0 +IEMIMPL_DIV_OP idiv, 0, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 1 ; @@ -2534,3 +2699,255 @@ IEMIMPL_FPU_R80_R80 fptan IEMIMPL_FPU_R80_R80 fxtract IEMIMPL_FPU_R80_R80 fsincos + + + +;---------------------- SSE and MMX Operations ---------------------- + +;; @todo what do we need to do for MMX? +%macro IEMIMPL_MMX_PROLOGUE 0 +%endmacro +%macro IEMIMPL_MMX_EPILOGUE 0 +%endmacro + +;; @todo what do we need to do for SSE? +%macro IEMIMPL_SSE_PROLOGUE 0 +%endmacro +%macro IEMIMPL_SSE_EPILOGUE 0 +%endmacro + + +;; +; Media instruction working on two full sized registers. +; +; @param 1 The instruction +; +; @param A0 FPU context (fxsave). +; @param A1 Pointer to the first media register size operand (input/output). +; @param A2 Pointer to the second media register size operand (input). +; +%macro IEMIMPL_MEDIA_F2 1 +BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 12 + PROLOGUE_3_ARGS + IEMIMPL_MMX_PROLOGUE + + movq mm0, [A1] + movq mm1, [A2] + %1 mm0, mm1 + movq [A1], mm0 + + IEMIMPL_MMX_EPILOGUE + EPILOGUE_3_ARGS +ENDPROC iemAImpl_ %+ %1 %+ _u64 + +BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u128, 12 + PROLOGUE_3_ARGS + IEMIMPL_SSE_PROLOGUE + + movdqu xmm0, [A1] + movdqu xmm1, [A2] + %1 xmm0, xmm1 + movdqu [A1], xmm0 + + IEMIMPL_SSE_EPILOGUE + EPILOGUE_3_ARGS +ENDPROC iemAImpl_ %+ %1 %+ _u128 +%endmacro + +IEMIMPL_MEDIA_F2 pxor +IEMIMPL_MEDIA_F2 pcmpeqb +IEMIMPL_MEDIA_F2 pcmpeqw +IEMIMPL_MEDIA_F2 pcmpeqd + + +;; +; Media instruction working on one full sized and one half sized register (lower half). +; +; @param 1 The instruction +; @param 2 1 if MMX is included, 0 if not. +; +; @param A0 FPU context (fxsave). +; @param A1 Pointer to the first full sized media register operand (input/output). +; @param A2 Pointer to the second half sized media register operand (input). +; +%macro IEMIMPL_MEDIA_F1L1 2 + %if %2 != 0 +BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 12 + PROLOGUE_3_ARGS + IEMIMPL_MMX_PROLOGUE + + movq mm0, [A1] + movd mm1, [A2] + %1 mm0, mm1 + movq [A1], mm0 + + IEMIMPL_MMX_EPILOGUE + EPILOGUE_3_ARGS +ENDPROC iemAImpl_ %+ %1 %+ _u64 + %endif + +BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u128, 12 + PROLOGUE_3_ARGS + IEMIMPL_SSE_PROLOGUE + + movdqu xmm0, [A1] + movq xmm1, [A2] + %1 xmm0, xmm1 + movdqu [A1], xmm0 + + IEMIMPL_SSE_EPILOGUE + EPILOGUE_3_ARGS +ENDPROC iemAImpl_ %+ %1 %+ _u128 +%endmacro + +IEMIMPL_MEDIA_F1L1 punpcklbw, 1 +IEMIMPL_MEDIA_F1L1 punpcklwd, 1 +IEMIMPL_MEDIA_F1L1 punpckldq, 1 +IEMIMPL_MEDIA_F1L1 punpcklqdq, 0 + + +;; +; Media instruction working on one full sized and one half sized register (high half). +; +; @param 1 The instruction +; @param 2 1 if MMX is included, 0 if not. +; +; @param A0 FPU context (fxsave). +; @param A1 Pointer to the first full sized media register operand (input/output). +; @param A2 Pointer to the second full sized media register operand, where we +; will only use the upper half (input). +; +%macro IEMIMPL_MEDIA_F1H1 2 + %if %2 != 0 +BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 12 + PROLOGUE_3_ARGS + IEMIMPL_MMX_PROLOGUE + + movq mm0, [A1] + movq mm1, [A2] + %1 mm0, mm1 + movq [A1], mm0 + + IEMIMPL_MMX_EPILOGUE + EPILOGUE_3_ARGS +ENDPROC iemAImpl_ %+ %1 %+ _u64 + %endif + +BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u128, 12 + PROLOGUE_3_ARGS + IEMIMPL_SSE_PROLOGUE + + movdqu xmm0, [A1] + movdqu xmm1, [A2] + %1 xmm0, xmm1 + movdqu [A1], xmm0 + + IEMIMPL_SSE_EPILOGUE + EPILOGUE_3_ARGS +ENDPROC iemAImpl_ %+ %1 %+ _u128 +%endmacro + +IEMIMPL_MEDIA_F1L1 punpckhbw, 1 +IEMIMPL_MEDIA_F1L1 punpckhwd, 1 +IEMIMPL_MEDIA_F1L1 punpckhdq, 1 +IEMIMPL_MEDIA_F1L1 punpckhqdq, 0 + + +; +; Shufflers with evil 8-bit immediates. +; + +BEGINPROC_FASTCALL iemAImpl_pshufw, 16 + PROLOGUE_4_ARGS + IEMIMPL_MMX_PROLOGUE + + movq mm0, [A1] + movq mm1, [A2] + lea T0, [A3 + A3*4] ; sizeof(pshufw+ret) == 5 + lea T1, [.imm0 xWrtRIP] + lea T1, [T1 + T0] + call T1 + movq [A1], mm0 + + IEMIMPL_MMX_EPILOGUE + EPILOGUE_4_ARGS +%assign bImm 0 +%rep 256 +.imm %+ bImm: + pshufw mm0, mm1, bImm + ret + %assign bImm bImm + 1 +%endrep +.immEnd: ; 256*5 == 0x500 +dw 0xfaff + (.immEnd - .imm0) ; will cause warning if entries are too big. +dw 0x104ff - (.immEnd - .imm0) ; will cause warning if entries are small big. +ENDPROC iemAImpl_pshufw + + +%macro IEMIMPL_MEDIA_SSE_PSHUFXX 1 +BEGINPROC_FASTCALL iemAImpl_ %+ %1, 16 + PROLOGUE_4_ARGS + IEMIMPL_SSE_PROLOGUE + + movdqu xmm0, [A1] + movdqu xmm1, [A2] + lea T1, [.imm0 xWrtRIP] + lea T0, [A3 + A3*2] ; sizeof(pshufXX+ret) == 6: (A3 * 3) *2 + lea T1, [T1 + T0*2] + call T1 + movdqu [A1], xmm0 + + IEMIMPL_SSE_EPILOGUE + EPILOGUE_4_ARGS + %assign bImm 0 + %rep 256 +.imm %+ bImm: + %1 xmm0, xmm1, bImm + ret + %assign bImm bImm + 1 + %endrep +.immEnd: ; 256*6 == 0x600 +dw 0xf9ff + (.immEnd - .imm0) ; will cause warning if entries are too big. +dw 0x105ff - (.immEnd - .imm0) ; will cause warning if entries are small big. +ENDPROC iemAImpl_ %+ %1 +%endmacro + +IEMIMPL_MEDIA_SSE_PSHUFXX pshufhw +IEMIMPL_MEDIA_SSE_PSHUFXX pshuflw +IEMIMPL_MEDIA_SSE_PSHUFXX pshufd + + +; +; Move byte mask. +; + +BEGINPROC_FASTCALL iemAImpl_pmovmskb_u64, 12 + PROLOGUE_3_ARGS + IEMIMPL_MMX_PROLOGUE + + mov T0, [A1] + movq mm1, [A2] + pmovmskb T0, mm1 + mov [A1], T0 +%ifdef RT_ARCH_X86 + mov dword [A1 + 4], 0 +%endif + IEMIMPL_MMX_EPILOGUE + EPILOGUE_3_ARGS +ENDPROC iemAImpl_pmovmskb_u64 + +BEGINPROC_FASTCALL iemAImpl_pmovmskb_u128, 12 + PROLOGUE_3_ARGS + IEMIMPL_SSE_PROLOGUE + + mov T0, [A1] + movdqu xmm1, [A2] + pmovmskb T0, xmm1 + mov [A1], T0 +%ifdef RT_ARCH_X86 + mov dword [A1 + 4], 0 +%endif + IEMIMPL_SSE_EPILOGUE + EPILOGUE_3_ARGS +ENDPROC iemAImpl_pmovmskb_u128 + diff --git a/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp b/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp index 3c3ebc19..1391f685 100644 --- a/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp +++ b/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2011 Oracle Corporation + * Copyright (C) 2011-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -22,27 +22,1160 @@ #include <VBox/vmm/vm.h> #include <iprt/x86.h> -#if 0 +/******************************************************************************* +* Global Variables * +*******************************************************************************/ +/** + * Parity calculation table. + * + * The generator code: + * @code + * #include <stdio.h> + * + * int main() + * { + * unsigned b; + * for (b = 0; b < 256; b++) + * { + * int cOnes = ( b & 1) + * + ((b >> 1) & 1) + * + ((b >> 2) & 1) + * + ((b >> 3) & 1) + * + ((b >> 4) & 1) + * + ((b >> 5) & 1) + * + ((b >> 6) & 1) + * + ((b >> 7) & 1); + * printf(" /" "* %#04x = %u%u%u%u%u%u%u%ub *" "/ %s,\n", + * b, + * (b >> 7) & 1, + * (b >> 6) & 1, + * (b >> 5) & 1, + * (b >> 4) & 1, + * (b >> 3) & 1, + * (b >> 2) & 1, + * (b >> 1) & 1, + * b & 1, + * cOnes & 1 ? "0" : "X86_EFL_PF"); + * } + * return 0; + * } + * @endcode + */ +static uint8_t const g_afParity[256] = +{ + /* 0000 = 00000000b */ X86_EFL_PF, + /* 0x01 = 00000001b */ 0, + /* 0x02 = 00000010b */ 0, + /* 0x03 = 00000011b */ X86_EFL_PF, + /* 0x04 = 00000100b */ 0, + /* 0x05 = 00000101b */ X86_EFL_PF, + /* 0x06 = 00000110b */ X86_EFL_PF, + /* 0x07 = 00000111b */ 0, + /* 0x08 = 00001000b */ 0, + /* 0x09 = 00001001b */ X86_EFL_PF, + /* 0x0a = 00001010b */ X86_EFL_PF, + /* 0x0b = 00001011b */ 0, + /* 0x0c = 00001100b */ X86_EFL_PF, + /* 0x0d = 00001101b */ 0, + /* 0x0e = 00001110b */ 0, + /* 0x0f = 00001111b */ X86_EFL_PF, + /* 0x10 = 00010000b */ 0, + /* 0x11 = 00010001b */ X86_EFL_PF, + /* 0x12 = 00010010b */ X86_EFL_PF, + /* 0x13 = 00010011b */ 0, + /* 0x14 = 00010100b */ X86_EFL_PF, + /* 0x15 = 00010101b */ 0, + /* 0x16 = 00010110b */ 0, + /* 0x17 = 00010111b */ X86_EFL_PF, + /* 0x18 = 00011000b */ X86_EFL_PF, + /* 0x19 = 00011001b */ 0, + /* 0x1a = 00011010b */ 0, + /* 0x1b = 00011011b */ X86_EFL_PF, + /* 0x1c = 00011100b */ 0, + /* 0x1d = 00011101b */ X86_EFL_PF, + /* 0x1e = 00011110b */ X86_EFL_PF, + /* 0x1f = 00011111b */ 0, + /* 0x20 = 00100000b */ 0, + /* 0x21 = 00100001b */ X86_EFL_PF, + /* 0x22 = 00100010b */ X86_EFL_PF, + /* 0x23 = 00100011b */ 0, + /* 0x24 = 00100100b */ X86_EFL_PF, + /* 0x25 = 00100101b */ 0, + /* 0x26 = 00100110b */ 0, + /* 0x27 = 00100111b */ X86_EFL_PF, + /* 0x28 = 00101000b */ X86_EFL_PF, + /* 0x29 = 00101001b */ 0, + /* 0x2a = 00101010b */ 0, + /* 0x2b = 00101011b */ X86_EFL_PF, + /* 0x2c = 00101100b */ 0, + /* 0x2d = 00101101b */ X86_EFL_PF, + /* 0x2e = 00101110b */ X86_EFL_PF, + /* 0x2f = 00101111b */ 0, + /* 0x30 = 00110000b */ X86_EFL_PF, + /* 0x31 = 00110001b */ 0, + /* 0x32 = 00110010b */ 0, + /* 0x33 = 00110011b */ X86_EFL_PF, + /* 0x34 = 00110100b */ 0, + /* 0x35 = 00110101b */ X86_EFL_PF, + /* 0x36 = 00110110b */ X86_EFL_PF, + /* 0x37 = 00110111b */ 0, + /* 0x38 = 00111000b */ 0, + /* 0x39 = 00111001b */ X86_EFL_PF, + /* 0x3a = 00111010b */ X86_EFL_PF, + /* 0x3b = 00111011b */ 0, + /* 0x3c = 00111100b */ X86_EFL_PF, + /* 0x3d = 00111101b */ 0, + /* 0x3e = 00111110b */ 0, + /* 0x3f = 00111111b */ X86_EFL_PF, + /* 0x40 = 01000000b */ 0, + /* 0x41 = 01000001b */ X86_EFL_PF, + /* 0x42 = 01000010b */ X86_EFL_PF, + /* 0x43 = 01000011b */ 0, + /* 0x44 = 01000100b */ X86_EFL_PF, + /* 0x45 = 01000101b */ 0, + /* 0x46 = 01000110b */ 0, + /* 0x47 = 01000111b */ X86_EFL_PF, + /* 0x48 = 01001000b */ X86_EFL_PF, + /* 0x49 = 01001001b */ 0, + /* 0x4a = 01001010b */ 0, + /* 0x4b = 01001011b */ X86_EFL_PF, + /* 0x4c = 01001100b */ 0, + /* 0x4d = 01001101b */ X86_EFL_PF, + /* 0x4e = 01001110b */ X86_EFL_PF, + /* 0x4f = 01001111b */ 0, + /* 0x50 = 01010000b */ X86_EFL_PF, + /* 0x51 = 01010001b */ 0, + /* 0x52 = 01010010b */ 0, + /* 0x53 = 01010011b */ X86_EFL_PF, + /* 0x54 = 01010100b */ 0, + /* 0x55 = 01010101b */ X86_EFL_PF, + /* 0x56 = 01010110b */ X86_EFL_PF, + /* 0x57 = 01010111b */ 0, + /* 0x58 = 01011000b */ 0, + /* 0x59 = 01011001b */ X86_EFL_PF, + /* 0x5a = 01011010b */ X86_EFL_PF, + /* 0x5b = 01011011b */ 0, + /* 0x5c = 01011100b */ X86_EFL_PF, + /* 0x5d = 01011101b */ 0, + /* 0x5e = 01011110b */ 0, + /* 0x5f = 01011111b */ X86_EFL_PF, + /* 0x60 = 01100000b */ X86_EFL_PF, + /* 0x61 = 01100001b */ 0, + /* 0x62 = 01100010b */ 0, + /* 0x63 = 01100011b */ X86_EFL_PF, + /* 0x64 = 01100100b */ 0, + /* 0x65 = 01100101b */ X86_EFL_PF, + /* 0x66 = 01100110b */ X86_EFL_PF, + /* 0x67 = 01100111b */ 0, + /* 0x68 = 01101000b */ 0, + /* 0x69 = 01101001b */ X86_EFL_PF, + /* 0x6a = 01101010b */ X86_EFL_PF, + /* 0x6b = 01101011b */ 0, + /* 0x6c = 01101100b */ X86_EFL_PF, + /* 0x6d = 01101101b */ 0, + /* 0x6e = 01101110b */ 0, + /* 0x6f = 01101111b */ X86_EFL_PF, + /* 0x70 = 01110000b */ 0, + /* 0x71 = 01110001b */ X86_EFL_PF, + /* 0x72 = 01110010b */ X86_EFL_PF, + /* 0x73 = 01110011b */ 0, + /* 0x74 = 01110100b */ X86_EFL_PF, + /* 0x75 = 01110101b */ 0, + /* 0x76 = 01110110b */ 0, + /* 0x77 = 01110111b */ X86_EFL_PF, + /* 0x78 = 01111000b */ X86_EFL_PF, + /* 0x79 = 01111001b */ 0, + /* 0x7a = 01111010b */ 0, + /* 0x7b = 01111011b */ X86_EFL_PF, + /* 0x7c = 01111100b */ 0, + /* 0x7d = 01111101b */ X86_EFL_PF, + /* 0x7e = 01111110b */ X86_EFL_PF, + /* 0x7f = 01111111b */ 0, + /* 0x80 = 10000000b */ 0, + /* 0x81 = 10000001b */ X86_EFL_PF, + /* 0x82 = 10000010b */ X86_EFL_PF, + /* 0x83 = 10000011b */ 0, + /* 0x84 = 10000100b */ X86_EFL_PF, + /* 0x85 = 10000101b */ 0, + /* 0x86 = 10000110b */ 0, + /* 0x87 = 10000111b */ X86_EFL_PF, + /* 0x88 = 10001000b */ X86_EFL_PF, + /* 0x89 = 10001001b */ 0, + /* 0x8a = 10001010b */ 0, + /* 0x8b = 10001011b */ X86_EFL_PF, + /* 0x8c = 10001100b */ 0, + /* 0x8d = 10001101b */ X86_EFL_PF, + /* 0x8e = 10001110b */ X86_EFL_PF, + /* 0x8f = 10001111b */ 0, + /* 0x90 = 10010000b */ X86_EFL_PF, + /* 0x91 = 10010001b */ 0, + /* 0x92 = 10010010b */ 0, + /* 0x93 = 10010011b */ X86_EFL_PF, + /* 0x94 = 10010100b */ 0, + /* 0x95 = 10010101b */ X86_EFL_PF, + /* 0x96 = 10010110b */ X86_EFL_PF, + /* 0x97 = 10010111b */ 0, + /* 0x98 = 10011000b */ 0, + /* 0x99 = 10011001b */ X86_EFL_PF, + /* 0x9a = 10011010b */ X86_EFL_PF, + /* 0x9b = 10011011b */ 0, + /* 0x9c = 10011100b */ X86_EFL_PF, + /* 0x9d = 10011101b */ 0, + /* 0x9e = 10011110b */ 0, + /* 0x9f = 10011111b */ X86_EFL_PF, + /* 0xa0 = 10100000b */ X86_EFL_PF, + /* 0xa1 = 10100001b */ 0, + /* 0xa2 = 10100010b */ 0, + /* 0xa3 = 10100011b */ X86_EFL_PF, + /* 0xa4 = 10100100b */ 0, + /* 0xa5 = 10100101b */ X86_EFL_PF, + /* 0xa6 = 10100110b */ X86_EFL_PF, + /* 0xa7 = 10100111b */ 0, + /* 0xa8 = 10101000b */ 0, + /* 0xa9 = 10101001b */ X86_EFL_PF, + /* 0xaa = 10101010b */ X86_EFL_PF, + /* 0xab = 10101011b */ 0, + /* 0xac = 10101100b */ X86_EFL_PF, + /* 0xad = 10101101b */ 0, + /* 0xae = 10101110b */ 0, + /* 0xaf = 10101111b */ X86_EFL_PF, + /* 0xb0 = 10110000b */ 0, + /* 0xb1 = 10110001b */ X86_EFL_PF, + /* 0xb2 = 10110010b */ X86_EFL_PF, + /* 0xb3 = 10110011b */ 0, + /* 0xb4 = 10110100b */ X86_EFL_PF, + /* 0xb5 = 10110101b */ 0, + /* 0xb6 = 10110110b */ 0, + /* 0xb7 = 10110111b */ X86_EFL_PF, + /* 0xb8 = 10111000b */ X86_EFL_PF, + /* 0xb9 = 10111001b */ 0, + /* 0xba = 10111010b */ 0, + /* 0xbb = 10111011b */ X86_EFL_PF, + /* 0xbc = 10111100b */ 0, + /* 0xbd = 10111101b */ X86_EFL_PF, + /* 0xbe = 10111110b */ X86_EFL_PF, + /* 0xbf = 10111111b */ 0, + /* 0xc0 = 11000000b */ X86_EFL_PF, + /* 0xc1 = 11000001b */ 0, + /* 0xc2 = 11000010b */ 0, + /* 0xc3 = 11000011b */ X86_EFL_PF, + /* 0xc4 = 11000100b */ 0, + /* 0xc5 = 11000101b */ X86_EFL_PF, + /* 0xc6 = 11000110b */ X86_EFL_PF, + /* 0xc7 = 11000111b */ 0, + /* 0xc8 = 11001000b */ 0, + /* 0xc9 = 11001001b */ X86_EFL_PF, + /* 0xca = 11001010b */ X86_EFL_PF, + /* 0xcb = 11001011b */ 0, + /* 0xcc = 11001100b */ X86_EFL_PF, + /* 0xcd = 11001101b */ 0, + /* 0xce = 11001110b */ 0, + /* 0xcf = 11001111b */ X86_EFL_PF, + /* 0xd0 = 11010000b */ 0, + /* 0xd1 = 11010001b */ X86_EFL_PF, + /* 0xd2 = 11010010b */ X86_EFL_PF, + /* 0xd3 = 11010011b */ 0, + /* 0xd4 = 11010100b */ X86_EFL_PF, + /* 0xd5 = 11010101b */ 0, + /* 0xd6 = 11010110b */ 0, + /* 0xd7 = 11010111b */ X86_EFL_PF, + /* 0xd8 = 11011000b */ X86_EFL_PF, + /* 0xd9 = 11011001b */ 0, + /* 0xda = 11011010b */ 0, + /* 0xdb = 11011011b */ X86_EFL_PF, + /* 0xdc = 11011100b */ 0, + /* 0xdd = 11011101b */ X86_EFL_PF, + /* 0xde = 11011110b */ X86_EFL_PF, + /* 0xdf = 11011111b */ 0, + /* 0xe0 = 11100000b */ 0, + /* 0xe1 = 11100001b */ X86_EFL_PF, + /* 0xe2 = 11100010b */ X86_EFL_PF, + /* 0xe3 = 11100011b */ 0, + /* 0xe4 = 11100100b */ X86_EFL_PF, + /* 0xe5 = 11100101b */ 0, + /* 0xe6 = 11100110b */ 0, + /* 0xe7 = 11100111b */ X86_EFL_PF, + /* 0xe8 = 11101000b */ X86_EFL_PF, + /* 0xe9 = 11101001b */ 0, + /* 0xea = 11101010b */ 0, + /* 0xeb = 11101011b */ X86_EFL_PF, + /* 0xec = 11101100b */ 0, + /* 0xed = 11101101b */ X86_EFL_PF, + /* 0xee = 11101110b */ X86_EFL_PF, + /* 0xef = 11101111b */ 0, + /* 0xf0 = 11110000b */ X86_EFL_PF, + /* 0xf1 = 11110001b */ 0, + /* 0xf2 = 11110010b */ 0, + /* 0xf3 = 11110011b */ X86_EFL_PF, + /* 0xf4 = 11110100b */ 0, + /* 0xf5 = 11110101b */ X86_EFL_PF, + /* 0xf6 = 11110110b */ X86_EFL_PF, + /* 0xf7 = 11110111b */ 0, + /* 0xf8 = 11111000b */ 0, + /* 0xf9 = 11111001b */ X86_EFL_PF, + /* 0xfa = 11111010b */ X86_EFL_PF, + /* 0xfb = 11111011b */ 0, + /* 0xfc = 11111100b */ X86_EFL_PF, + /* 0xfd = 11111101b */ 0, + /* 0xfe = 11111110b */ 0, + /* 0xff = 11111111b */ X86_EFL_PF, +}; + + +/** + * Calculates the signed flag value given a result and it's bit width. + * + * The signed flag (SF) is a duplication of the most significant bit in the + * result. + * + * @returns X86_EFL_SF or 0. + * @param a_uResult Unsigned result value. + * @param a_cBitsWidth The width of the result (8, 16, 32, 64). + */ +#define X86_EFL_CALC_SF(a_uResult, a_cBitsWidth) \ + ( (uint32_t)((a_uResult) >> ((a_cBitsWidth) - X86_EFL_SF_BIT)) & X86_EFL_SF ) + +/** + * Calculates the zero flag value given a result. + * + * The zero flag (ZF) indicates whether the result is zero or not. + * + * @returns X86_EFL_ZF or 0. + * @param a_uResult Unsigned result value. + */ +#define X86_EFL_CALC_ZF(a_uResult) \ + ( (uint32_t)((a_uResult) == 0) << X86_EFL_ZF_BIT ) + +/** + * Updates the status bits (CF, PF, AF, ZF, SF, and OF) after a logical op. + * + * CF and OF are defined to be 0 by logical operations. AF on the other hand is + * undefined. We do not set AF, as that seems to make the most sense (which + * probably makes it the most wrong in real life). + * + * @returns Status bits. + * @param a_pfEFlags Pointer to the 32-bit EFLAGS value to update. + * @param a_uResult Unsigned result value. + * @param a_cBitsWidth The width of the result (8, 16, 32, 64). + * @param a_fExtra Additional bits to set. + */ +#define IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(a_pfEFlags, a_uResult, a_cBitsWidth, a_fExtra) \ + do { \ + uint32_t fEflTmp = *(a_pfEFlags); \ + fEflTmp &= ~X86_EFL_STATUS_BITS; \ + fEflTmp |= g_afParity[(a_uResult) & 0xff]; \ + fEflTmp |= X86_EFL_CALC_ZF(a_uResult); \ + fEflTmp |= X86_EFL_CALC_SF(a_uResult, a_cBitsWidth); \ + fEflTmp |= (a_fExtra); \ + *(a_pfEFlags) = fEflTmp; \ + } while (0) + + +#ifdef RT_ARCH_X86 +/* + * There are a few 64-bit on 32-bit things we'd rather do in C. Actually, doing + * it all in C is probably safer atm., optimize what's necessary later, maybe. + */ + + +/* Binary ops */ + +IEM_DECL_IMPL_DEF(void, iemAImpl_add_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + uint64_t uDst = *puDst; + uint64_t uResult = uDst + uSrc; + *puDst = uResult; + + /* Calc EFLAGS. */ + uint32_t fEfl = *pfEFlags & ~X86_EFL_STATUS_BITS; + fEfl |= (uResult < uDst) << X86_EFL_CF_BIT; + fEfl |= g_afParity[uResult & 0xff]; + fEfl |= ((uint32_t)uResult ^ (uint32_t)uSrc ^ (uint32_t)uDst) & X86_EFL_AF; + fEfl |= X86_EFL_CALC_ZF(uResult); + fEfl |= X86_EFL_CALC_SF(uResult, 64); + fEfl |= (((uDst ^ uSrc ^ RT_BIT_64(63)) & (uResult ^ uDst)) >> (64 - X86_EFL_OF_BIT)) & X86_EFL_OF; + *pfEFlags = fEfl; +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_adc_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + if (!(*pfEFlags & X86_EFL_CF)) + iemAImpl_add_u64(puDst, uSrc, pfEFlags); + else + { + uint64_t uDst = *puDst; + uint64_t uResult = uDst + uSrc + 1; + *puDst = uResult; + + /* Calc EFLAGS. */ + /** @todo verify AF and OF calculations. */ + uint32_t fEfl = *pfEFlags & ~X86_EFL_STATUS_BITS; + fEfl |= (uResult <= uDst) << X86_EFL_CF_BIT; + fEfl |= g_afParity[uResult & 0xff]; + fEfl |= ((uint32_t)uResult ^ (uint32_t)uSrc ^ (uint32_t)uDst) & X86_EFL_AF; + fEfl |= X86_EFL_CALC_ZF(uResult); + fEfl |= X86_EFL_CALC_SF(uResult, 64); + fEfl |= (((uDst ^ uSrc ^ RT_BIT_64(63)) & (uResult ^ uDst)) >> (64 - X86_EFL_OF_BIT)) & X86_EFL_OF; + *pfEFlags = fEfl; + } +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_sub_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + uint64_t uDst = *puDst; + uint64_t uResult = uDst - uSrc; + *puDst = uResult; + + /* Calc EFLAGS. */ + uint32_t fEfl = *pfEFlags & ~X86_EFL_STATUS_BITS; + fEfl |= (uDst < uSrc) << X86_EFL_CF_BIT; + fEfl |= g_afParity[uResult & 0xff]; + fEfl |= ((uint32_t)uResult ^ (uint32_t)uSrc ^ (uint32_t)uDst) & X86_EFL_AF; + fEfl |= X86_EFL_CALC_ZF(uResult); + fEfl |= X86_EFL_CALC_SF(uResult, 64); + fEfl |= (((uDst ^ uSrc) & (uResult ^ uDst)) >> (64 - X86_EFL_OF_BIT)) & X86_EFL_OF; + *pfEFlags = fEfl; +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_sbb_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + if (!(*pfEFlags & X86_EFL_CF)) + iemAImpl_sub_u64(puDst, uSrc, pfEFlags); + else + { + uint64_t uDst = *puDst; + uint64_t uResult = uDst - uSrc - 1; + *puDst = uResult; + + /* Calc EFLAGS. */ + /** @todo verify AF and OF calculations. */ + uint32_t fEfl = *pfEFlags & ~X86_EFL_STATUS_BITS; + fEfl |= (uDst <= uSrc) << X86_EFL_CF_BIT; + fEfl |= g_afParity[uResult & 0xff]; + fEfl |= ((uint32_t)uResult ^ (uint32_t)uSrc ^ (uint32_t)uDst) & X86_EFL_AF; + fEfl |= X86_EFL_CALC_ZF(uResult); + fEfl |= X86_EFL_CALC_SF(uResult, 64); + fEfl |= (((uDst ^ uSrc) & (uResult ^ uDst)) >> (64 - X86_EFL_OF_BIT)) & X86_EFL_OF; + *pfEFlags = fEfl; + } +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_or_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + uint64_t uResult = *puDst | uSrc; + *puDst = uResult; + IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uResult, 64, 0); +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_xor_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + uint64_t uResult = *puDst ^ uSrc; + *puDst = uResult; + IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uResult, 64, 0); +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_and_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + uint64_t uResult = *puDst & uSrc; + *puDst = uResult; + IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uResult, 64, 0); +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_cmp_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + uint64_t uDstTmp = *puDst; + iemAImpl_sub_u64(&uDstTmp, uSrc, pfEFlags); +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_test_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + uint64_t uResult = *puDst & uSrc; + IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uResult, 64, 0); +} + + +/** 64-bit locked binary operand operation. */ +# define DO_LOCKED_BIN_OP_U64(a_Mnemonic) \ + do { \ + uint64_t uOld = ASMAtomicReadU64(puDst); \ + uint64_t uTmp; \ + uint32_t fEflTmp; \ + do \ + { \ + uTmp = uOld; \ + fEflTmp = *pfEFlags; \ + iemAImpl_ ## a_Mnemonic ## _u64(&uTmp, uSrc, &fEflTmp); \ + } while (ASMAtomicCmpXchgExU64(puDst, uTmp, uOld, &uOld)); \ + *pfEFlags = fEflTmp; \ + } while (0) + + +IEM_DECL_IMPL_DEF(void, iemAImpl_add_u64_locked,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + DO_LOCKED_BIN_OP_U64(adc); +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_adc_u64_locked,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + DO_LOCKED_BIN_OP_U64(adc); +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_sub_u64_locked,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + DO_LOCKED_BIN_OP_U64(sub); +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_sbb_u64_locked,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + DO_LOCKED_BIN_OP_U64(sbb); +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_or_u64_locked,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + DO_LOCKED_BIN_OP_U64(or); +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_xor_u64_locked,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + DO_LOCKED_BIN_OP_U64(xor); +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_and_u64_locked,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + DO_LOCKED_BIN_OP_U64(and); +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64,(uint64_t *puDst, uint64_t *puReg, uint32_t *pfEFlags)) +{ + uint64_t uDst = *puDst; + uint64_t uResult = uDst; + iemAImpl_add_u64(&uResult, *puReg, pfEFlags); + *puDst = uResult; + *puReg = uDst; +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64_locked,(uint64_t *puDst, uint64_t *puReg, uint32_t *pfEFlags)) +{ + uint64_t uOld = ASMAtomicReadU64(puDst); + uint64_t uTmpDst; + uint32_t fEflTmp; + do + { + uTmpDst = uOld; + fEflTmp = *pfEFlags; + iemAImpl_add_u64(&uTmpDst, *puReg, pfEFlags); + } while (ASMAtomicCmpXchgExU64(puDst, uTmpDst, uOld, &uOld)); + *puReg = uOld; + *pfEFlags = fEflTmp; +} + + +/* Bit operations (same signature as above). */ + +IEM_DECL_IMPL_DEF(void, iemAImpl_bt_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + /* Note! "undefined" flags: OF, SF, ZF, AF, PF. We set them as after an + logical operation (AND/OR/whatever). */ + Assert(uSrc < 64); + uint64_t uDst = *puDst; + if (uDst & RT_BIT_64(uSrc)) + IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uDst, 64, X86_EFL_CF); + else + IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uDst, 64, 0); +} + +IEM_DECL_IMPL_DEF(void, iemAImpl_btc_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + /* Note! "undefined" flags: OF, SF, ZF, AF, PF. We set them as after an + logical operation (AND/OR/whatever). */ + Assert(uSrc < 64); + uint64_t fMask = RT_BIT_64(uSrc); + uint64_t uDst = *puDst; + if (uDst & fMask) + { + uDst &= ~fMask; + *puDst = uDst; + IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uDst, 64, X86_EFL_CF); + } + else + { + uDst |= fMask; + *puDst = uDst; + IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uDst, 64, 0); + } +} + +IEM_DECL_IMPL_DEF(void, iemAImpl_btr_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + /* Note! "undefined" flags: OF, SF, ZF, AF, PF. We set them as after an + logical operation (AND/OR/whatever). */ + Assert(uSrc < 64); + uint64_t fMask = RT_BIT_64(uSrc); + uint64_t uDst = *puDst; + if (uDst & fMask) + { + uDst &= ~fMask; + *puDst = uDst; + IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uDst, 64, X86_EFL_CF); + } + else + IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uDst, 64, 0); +} + +IEM_DECL_IMPL_DEF(void, iemAImpl_bts_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + /* Note! "undefined" flags: OF, SF, ZF, AF, PF. We set them as after an + logical operation (AND/OR/whatever). */ + Assert(uSrc < 64); + uint64_t fMask = RT_BIT_64(uSrc); + uint64_t uDst = *puDst; + if (uDst & fMask) + IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uDst, 64, X86_EFL_CF); + else + { + uDst |= fMask; + *puDst = uDst; + IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uDst, 64, 0); + } +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_btc_u64_locked,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + DO_LOCKED_BIN_OP_U64(btc); +} -IEM_DECL_IMPL_DEF(void, iemImpl_add_u8,(uint8_t *pu8Dst, uint8_t u8Src, uint32_t *pEFlags)) +IEM_DECL_IMPL_DEF(void, iemAImpl_btr_u64_locked,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) { - /* incorrect sketch (testing fastcall + gcc) */ - uint8_t u8Dst = *pu8Dst; - uint8_t u8Res = u8Dst + u8Src; - *pu8Dst = u8Res; + DO_LOCKED_BIN_OP_U64(btr); +} + +IEM_DECL_IMPL_DEF(void, iemAImpl_bts_u64_locked,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + DO_LOCKED_BIN_OP_U64(bts); +} + + +/* bit scan */ + +IEM_DECL_IMPL_DEF(void, iemAImpl_bsf_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + /* Note! "undefined" flags: OF, SF, AF, PF, CF. */ + /** @todo check what real CPUs does. */ + if (uSrc) + { + uint8_t iBit; + uint32_t u32Src; + if (uSrc & UINT32_MAX) + { + iBit = 0; + u32Src = uSrc; + } + else + { + iBit = 32; + u32Src = uSrc >> 32; + } + if (!(u32Src & UINT16_MAX)) + { + iBit += 16; + u32Src >>= 16; + } + if (!(u32Src & UINT8_MAX)) + { + iBit += 8; + u32Src >>= 8; + } + if (!(u32Src & 0xf)) + { + iBit += 4; + u32Src >>= 4; + } + if (!(u32Src & 0x3)) + { + iBit += 2; + u32Src >>= 2; + } + if (!(u32Src & 1)) + { + iBit += 1; + Assert(u32Src & 2); + } - if (u8Res) - *pEFlags &= X86_EFL_ZF; + *puDst = iBit; + *pfEFlags &= ~X86_EFL_ZF; + } else - *pEFlags |= X86_EFL_ZF; + *pfEFlags |= X86_EFL_ZF; +} + +IEM_DECL_IMPL_DEF(void, iemAImpl_bsr_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + /* Note! "undefined" flags: OF, SF, AF, PF, CF. */ + /** @todo check what real CPUs does. */ + if (uSrc) + { + uint8_t iBit; + uint32_t u32Src; + if (uSrc & UINT64_C(0xffffffff00000000)) + { + iBit = 64; + u32Src = uSrc >> 32; + } + else + { + iBit = 32; + u32Src = uSrc; + } + if (!(u32Src & UINT32_C(0xffff0000))) + { + iBit -= 16; + u32Src <<= 16; + } + if (!(u32Src & UINT32_C(0xff000000))) + { + iBit -= 8; + u32Src <<= 8; + } + if (!(u32Src & UINT32_C(0xf0000000))) + { + iBit -= 4; + u32Src <<= 4; + } + if (!(u32Src & UINT32_C(0xc0000000))) + { + iBit -= 2; + u32Src <<= 2; + } + if (!(u32Src & UINT32_C(0x10000000))) + { + iBit -= 1; + u32Src <<= 1; + Assert(u32Src & RT_BIT_64(63)); + } + + *puDst = iBit; + *pfEFlags &= ~X86_EFL_ZF; + } + else + *pfEFlags |= X86_EFL_ZF; +} + + +/* Unary operands. */ + +IEM_DECL_IMPL_DEF(void, iemAImpl_inc_u64,(uint64_t *puDst, uint32_t *pfEFlags)) +{ + uint64_t uDst = *puDst; + uint64_t uResult = uDst + 1; + *puDst = uResult; + + /* + * Calc EFLAGS. + * CF is NOT modified for hysterical raisins (allegedly for carrying and + * borrowing in arithmetic loops on intel 8008). + */ + uint32_t fEfl = *pfEFlags & ~(X86_EFL_STATUS_BITS & ~X86_EFL_CF); + fEfl |= g_afParity[uResult & 0xff]; + fEfl |= ((uint32_t)uResult ^ (uint32_t)uDst) & X86_EFL_AF; + fEfl |= X86_EFL_CALC_ZF(uResult); + fEfl |= X86_EFL_CALC_SF(uResult, 64); + fEfl |= (((uDst ^ RT_BIT_64(63)) & uResult) >> (64 - X86_EFL_OF_BIT)) & X86_EFL_OF; + *pfEFlags = fEfl; +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_dec_u64,(uint64_t *puDst, uint32_t *pfEFlags)) +{ + uint64_t uDst = *puDst; + uint64_t uResult = uDst - 1; + *puDst = uResult; + + /* + * Calc EFLAGS. + * CF is NOT modified for hysterical raisins (allegedly for carrying and + * borrowing in arithmetic loops on intel 8008). + */ + uint32_t fEfl = *pfEFlags & ~(X86_EFL_STATUS_BITS & ~X86_EFL_CF); + fEfl |= g_afParity[uResult & 0xff]; + fEfl |= ((uint32_t)uResult ^ (uint32_t)uDst) & X86_EFL_AF; + fEfl |= X86_EFL_CALC_ZF(uResult); + fEfl |= X86_EFL_CALC_SF(uResult, 64); + fEfl |= ((uDst & (uResult ^ RT_BIT_64(63))) >> (64 - X86_EFL_OF_BIT)) & X86_EFL_OF; + *pfEFlags = fEfl; +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_not_u64,(uint64_t *puDst, uint32_t *pfEFlags)) +{ + uint64_t uDst = *puDst; + uint64_t uResult = ~uDst; + *puDst = uResult; + /* EFLAGS are not modified. */ +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_neg_u64,(uint64_t *puDst, uint32_t *pfEFlags)) +{ + uint64_t uDst = 0; + uint64_t uSrc = *puDst; + uint64_t uResult = uDst - uSrc; + *puDst = uResult; + + /* Calc EFLAGS. */ + uint32_t fEfl = *pfEFlags & ~X86_EFL_STATUS_BITS; + fEfl |= (uSrc != 0) << X86_EFL_CF_BIT; + fEfl |= g_afParity[uResult & 0xff]; + fEfl |= ((uint32_t)uResult ^ (uint32_t)uDst) & X86_EFL_AF; + fEfl |= X86_EFL_CALC_ZF(uResult); + fEfl |= X86_EFL_CALC_SF(uResult, 64); + fEfl |= ((uSrc & uResult) >> (64 - X86_EFL_OF_BIT)) & X86_EFL_OF; + *pfEFlags = fEfl; +} + + +/** 64-bit locked unary operand operation. */ +# define DO_LOCKED_UNARY_OP_U64(a_Mnemonic) \ + do { \ + uint64_t uOld = ASMAtomicReadU64(puDst); \ + uint64_t uTmp; \ + uint32_t fEflTmp; \ + do \ + { \ + uTmp = uOld; \ + fEflTmp = *pfEFlags; \ + iemAImpl_ ## a_Mnemonic ## _u64(&uTmp, &fEflTmp); \ + } while (ASMAtomicCmpXchgExU64(puDst, uTmp, uOld, &uOld)); \ + *pfEFlags = fEflTmp; \ + } while (0) + +IEM_DECL_IMPL_DEF(void, iemAImpl_inc_u64_locked,(uint64_t *puDst, uint32_t *pfEFlags)) +{ + DO_LOCKED_UNARY_OP_U64(inc); +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_dec_u64_locked,(uint64_t *puDst, uint32_t *pfEFlags)) +{ + DO_LOCKED_UNARY_OP_U64(dec); +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_not_u64_locked,(uint64_t *puDst, uint32_t *pfEFlags)) +{ + DO_LOCKED_UNARY_OP_U64(not); +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_neg_u64_locked,(uint64_t *puDst, uint32_t *pfEFlags)) +{ + DO_LOCKED_UNARY_OP_U64(neg); +} + + +/* Shift and rotate. */ + +IEM_DECL_IMPL_DEF(void, iemAImpl_rol_u64,(uint64_t *puDst, uint8_t cShift, uint32_t *pfEFlags)) +{ + cShift &= 63; + if (cShift) + { + uint64_t uDst = *puDst; + uint64_t uResult; + uResult = uDst << cShift; + uResult |= uDst >> (64 - cShift); + *puDst = uResult; + + /* Calc EFLAGS. The OF bit is undefined if cShift > 1, we implement + it the same way as for 1 bit shifts. */ + AssertCompile(X86_EFL_CF_BIT == 0); + uint32_t fEfl = *pfEFlags & ~(X86_EFL_CF | X86_EFL_OF); + uint32_t fCarry = (uResult & 1); + fEfl |= fCarry; + fEfl |= ((uResult >> 63) ^ fCarry) << X86_EFL_OF_BIT; + *pfEFlags = fEfl; + } +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_ror_u64,(uint64_t *puDst, uint8_t cShift, uint32_t *pfEFlags)) +{ + cShift &= 63; + if (cShift) + { + uint64_t uDst = *puDst; + uint64_t uResult; + uResult = uDst >> cShift; + uResult |= uDst << (64 - cShift); + *puDst = uResult; + + /* Calc EFLAGS. The OF bit is undefined if cShift > 1, we implement + it the same way as for 1 bit shifts (OF = OF XOR New-CF). */ + AssertCompile(X86_EFL_CF_BIT == 0); + uint32_t fEfl = *pfEFlags & ~(X86_EFL_CF | X86_EFL_OF); + uint32_t fCarry = (uResult >> 63) & X86_EFL_CF; + fEfl |= fCarry; + fEfl |= (((uResult >> 62) ^ fCarry) << X86_EFL_OF_BIT) & X86_EFL_OF; + *pfEFlags = fEfl; + } +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_rcl_u64,(uint64_t *puDst, uint8_t cShift, uint32_t *pfEFlags)) +{ + cShift &= 63; + if (cShift) + { + uint32_t fEfl = *pfEFlags; + uint64_t uDst = *puDst; + uint64_t uResult; + uResult = uDst << cShift; + AssertCompile(X86_EFL_CF_BIT == 0); + if (cShift > 1) + uResult |= uDst >> (65 - cShift); + uResult |= (uint64_t)(fEfl & X86_EFL_CF) << (cShift - 1); + *puDst = uResult; + + /* Calc EFLAGS. The OF bit is undefined if cShift > 1, we implement + it the same way as for 1 bit shifts. */ + uint32_t fCarry = (uDst >> (64 - cShift)) & X86_EFL_CF; + fEfl &= ~(X86_EFL_CF | X86_EFL_OF); + fEfl |= fCarry; + fEfl |= ((uResult >> 63) ^ fCarry) << X86_EFL_OF_BIT; + *pfEFlags = fEfl; + } +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_rcr_u64,(uint64_t *puDst, uint8_t cShift, uint32_t *pfEFlags)) +{ + cShift &= 63; + if (cShift) + { + uint32_t fEfl = *pfEFlags; + uint64_t uDst = *puDst; + uint64_t uResult; + uResult = uDst >> cShift; + AssertCompile(X86_EFL_CF_BIT == 0); + if (cShift > 1) + uResult |= uDst << (65 - cShift); + uResult |= (uint64_t)(fEfl & X86_EFL_CF) << (64 - cShift); + *puDst = uResult; + + /* Calc EFLAGS. The OF bit is undefined if cShift > 1, we implement + it the same way as for 1 bit shifts. */ + uint32_t fCarry = (uDst >> (cShift - 1)) & X86_EFL_CF; + fEfl &= ~(X86_EFL_CF | X86_EFL_OF); + fEfl |= fCarry; + fEfl |= ((uResult >> 63) ^ fCarry) << X86_EFL_OF_BIT; + *pfEFlags = fEfl; + } +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_shl_u64,(uint64_t *puDst, uint8_t cShift, uint32_t *pfEFlags)) +{ + cShift &= 63; + if (cShift) + { + uint64_t uDst = *puDst; + uint64_t uResult = uDst << cShift; + *puDst = uResult; + + /* Calc EFLAGS. The OF bit is undefined if cShift > 1, we implement + it the same way as for 1 bit shifts. The AF bit is undefined, we + always set it to zero atm. */ + AssertCompile(X86_EFL_CF_BIT == 0); + uint32_t fEfl = *pfEFlags & ~X86_EFL_STATUS_BITS; + uint32_t fCarry = (uDst >> (64 - cShift)) & X86_EFL_CF; + fEfl |= fCarry; + fEfl |= ((uResult >> 63) ^ fCarry) << X86_EFL_OF_BIT; + fEfl |= X86_EFL_CALC_SF(uResult, 64); + fEfl |= X86_EFL_CALC_ZF(uResult); + fEfl |= g_afParity[uResult & 0xff]; + *pfEFlags = fEfl; + } +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_shr_u64,(uint64_t *puDst, uint8_t cShift, uint32_t *pfEFlags)) +{ + cShift &= 63; + if (cShift) + { + uint64_t uDst = *puDst; + uint64_t uResult = uDst >> cShift; + *puDst = uResult; + + /* Calc EFLAGS. The OF bit is undefined if cShift > 1, we implement + it the same way as for 1 bit shifts. The AF bit is undefined, we + always set it to zero atm. */ + AssertCompile(X86_EFL_CF_BIT == 0); + uint32_t fEfl = *pfEFlags & ~X86_EFL_STATUS_BITS; + fEfl |= (uDst >> (cShift - 1)) & X86_EFL_CF; + fEfl |= (uDst >> 63) << X86_EFL_OF_BIT; + fEfl |= X86_EFL_CALC_SF(uResult, 64); + fEfl |= X86_EFL_CALC_ZF(uResult); + fEfl |= g_afParity[uResult & 0xff]; + *pfEFlags = fEfl; + } +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_sar_u64,(uint64_t *puDst, uint8_t cShift, uint32_t *pfEFlags)) +{ + cShift &= 63; + if (cShift) + { + uint64_t uDst = *puDst; + uint64_t uResult = (int64_t)uDst >> cShift; + *puDst = uResult; + + /* Calc EFLAGS. The OF bit is undefined if cShift > 1, we implement + it the same way as for 1 bit shifts (0). The AF bit is undefined, + we always set it to zero atm. */ + AssertCompile(X86_EFL_CF_BIT == 0); + uint32_t fEfl = *pfEFlags & ~X86_EFL_STATUS_BITS; + fEfl |= (uDst >> (cShift - 1)) & X86_EFL_CF; + fEfl |= X86_EFL_CALC_SF(uResult, 64); + fEfl |= X86_EFL_CALC_ZF(uResult); + fEfl |= g_afParity[uResult & 0xff]; + *pfEFlags = fEfl; + } +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_shld_u64,(uint64_t *puDst, uint64_t uSrc, uint8_t cShift, uint32_t *pfEFlags)) +{ + cShift &= 63; + if (cShift) + { + uint64_t uDst = *puDst; + uint64_t uResult; + uResult = uDst << cShift; + uResult |= uSrc >> (64 - cShift); + *puDst = uResult; + + /* Calc EFLAGS. The OF bit is undefined if cShift > 1, we implement + it the same way as for 1 bit shifts. The AF bit is undefined, + we always set it to zero atm. */ + AssertCompile(X86_EFL_CF_BIT == 0); + uint32_t fEfl = *pfEFlags & ~X86_EFL_STATUS_BITS; + fEfl |= (uDst >> (64 - cShift)) & X86_EFL_CF; + fEfl |= (uint32_t)((uDst >> 63) ^ (uint32_t)(uResult >> 63)) << X86_EFL_OF_BIT; + fEfl |= X86_EFL_CALC_SF(uResult, 64); + fEfl |= X86_EFL_CALC_ZF(uResult); + fEfl |= g_afParity[uResult & 0xff]; + *pfEFlags = fEfl; + } } -IEM_DECL_IMPL_DEF(void, iemImpl_add_u8_locked,(uint8_t *pu8Dst, uint8_t u8Src, uint32_t *pEFlags)) + +IEM_DECL_IMPL_DEF(void, iemAImpl_shrd_u64,(uint64_t *puDst, uint64_t uSrc, uint8_t cShift, uint32_t *pfEFlags)) { - iemImpl_add_u8(pu8Dst, u8Src, pEFlags); + cShift &= 63; + if (cShift) + { + uint64_t uDst = *puDst; + uint64_t uResult; + uResult = uDst >> cShift; + uResult |= uSrc << (64 - cShift); + *puDst = uResult; + + /* Calc EFLAGS. The OF bit is undefined if cShift > 1, we implement + it the same way as for 1 bit shifts. The AF bit is undefined, + we always set it to zero atm. */ + AssertCompile(X86_EFL_CF_BIT == 0); + uint32_t fEfl = *pfEFlags & ~X86_EFL_STATUS_BITS; + fEfl |= (uDst >> (cShift - 1)) & X86_EFL_CF; + fEfl |= (uint32_t)((uDst >> 63) ^ (uint32_t)(uResult >> 63)) << X86_EFL_OF_BIT; + fEfl |= X86_EFL_CALC_SF(uResult, 64); + fEfl |= X86_EFL_CALC_ZF(uResult); + fEfl |= g_afParity[uResult & 0xff]; + *pfEFlags = fEfl; + } } -#endif +/* misc */ + +IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64,(uint64_t *puMem, uint64_t *puReg)) +{ + /* XCHG implies LOCK. */ + uint64_t uOldMem = *puMem; + while (!ASMAtomicCmpXchgExU64(puMem, *puReg, uOldMem, &uOldMem)) + ASMNopPause(); + *puReg = uOldMem; +} + + +/* multiplication and division */ + +IEM_DECL_IMPL_DEF(int, iemAImpl_mul_u64,(uint64_t *pu64RAX, uint64_t *pu64RDX, uint64_t u64Factor, uint32_t *pfEFlags)) +{ + AssertFailed(); + return -1; +} + + +IEM_DECL_IMPL_DEF(int, iemAImpl_imul_u64,(uint64_t *pu64RAX, uint64_t *pu64RDX, uint64_t u64Factor, uint32_t *pfEFlags)) +{ + AssertFailed(); + return -1; +} + + +IEM_DECL_IMPL_DEF(void, iemAImpl_imul_two_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) +{ + AssertFailed(); +} + + + +IEM_DECL_IMPL_DEF(int, iemAImpl_div_u64,(uint64_t *pu64RAX, uint64_t *pu64RDX, uint64_t u64Divisor, uint32_t *pfEFlags)) +{ + AssertFailed(); + return -1; +} + + +IEM_DECL_IMPL_DEF(int, iemAImpl_idiv_u64,(uint64_t *pu64RAX, uint64_t *pu64RDX, uint64_t u64Divisor, uint32_t *pfEFlags)) +{ + AssertFailed(); + return -1; +} + + +#endif /* RT_ARCH_X86 */ + + +IEM_DECL_IMPL_DEF(void, iemAImpl_arpl,(uint16_t *pu16Dst, uint16_t u16Src, uint32_t *pfEFlags)) +{ + if ((*pu16Dst & X86_SEL_RPL) < (u16Src & X86_SEL_RPL)) + { + *pu16Dst &= X86_SEL_MASK_OFF_RPL; + *pu16Dst |= u16Src & X86_SEL_RPL; + + *pfEFlags |= X86_EFL_ZF; + } + else + *pfEFlags &= ~X86_EFL_ZF; +} diff --git a/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h b/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h index b554dfeb..1e6ac7b6 100644 --- a/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h +++ b/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2011-2012 Oracle Corporation + * Copyright (C) 2011-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -20,6 +20,94 @@ * @{ */ + +/** + * Worker function for iemHlpCheckPortIOPermission, don't call directly. + * + * @returns Strict VBox status code. + * + * @param pIemCpu The IEM per CPU data. + * @param pCtx The register context. + * @param u16Port The port number. + * @param cbOperand The operand size. + */ +static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand) +{ + /* The TSS bits we're interested in are the same on 386 and AMD64. */ + AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY); + AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL); + AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap); + AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64)); + + /* + * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap. + */ + Assert(!pCtx->tr.Attr.n.u1DescType); + if (RT_UNLIKELY( pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY + && pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL)) + { + Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n", + u16Port, cbOperand, pCtx->tr.Attr.n.u4Type, pCtx->tr.Attr.u)); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + + /* + * Read the bitmap offset (may #PF). + */ + uint16_t offBitmap; + VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pIemCpu, &offBitmap, UINT8_MAX, + pCtx->tr.u64Base + RT_OFFSETOF(X86TSS64, offIoBitmap)); + if (rcStrict != VINF_SUCCESS) + { + Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict))); + return rcStrict; + } + + /* + * The bit range from u16Port to (u16Port + cbOperand - 1), however intel + * describes the CPU actually reading two bytes regardless of whether the + * bit range crosses a byte boundrary. Thus the + 1 in the test below. + */ + uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap; + /** @todo check if real CPUs ensures that offBitmap has a minimum value of + * for instance sizeof(X86TSS32). */ + if (offFirstBit + 1 > pCtx->tr.u32Limit) /* the limit is inclusive */ + { + Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n", + offFirstBit, pCtx->tr.u32Limit)); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + + /* + * Read the necessary bits. + */ + /** @todo Test the assertion in the intel manual that the CPU reads two + * bytes. The question is how this works wrt to #PF and #GP on the + * 2nd byte when it's not required. */ + uint16_t bmBytes = UINT16_MAX; + rcStrict = iemMemFetchSysU16(pIemCpu, &bmBytes, UINT8_MAX, pCtx->tr.u64Base + offFirstBit); + if (rcStrict != VINF_SUCCESS) + { + Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict))); + return rcStrict; + } + + /* + * Perform the check. + */ + uint16_t fPortMask = (1 << cbOperand) - 1; + bmBytes >>= (u16Port & 7); + if (bmBytes & fPortMask) + { + Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n", + u16Port, cbOperand, bmBytes, fPortMask)); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + + return VINF_SUCCESS; +} + + /** * Checks if we are allowed to access the given I/O port, raising the * appropriate exceptions if we aren't (or if the I/O bitmap is not @@ -39,10 +127,7 @@ DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX if ( (pCtx->cr0 & X86_CR0_PE) && ( pIemCpu->uCpl > Efl.Bits.u2IOPL || Efl.Bits.u1VM) ) - { - NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */ - IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap\n")); - } + return iemHlpCheckPortIOPermissionBitmap(pIemCpu, pCtx, u16Port, cbOperand); return VINF_SUCCESS; } @@ -97,6 +182,9 @@ static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_ iemAImpl_test_u8(&u8Result, u8Result, &fEFlags); pCtx->eflags.u &= ~(fToUpdate | fUndefined); pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags; +#ifdef IEM_VERIFICATION_MODE_FULL + pIemCpu->fUndefinedEFlags |= fUndefined; +#endif } @@ -104,19 +192,30 @@ static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_ * Loads a NULL data selector into a selector register, both the hidden and * visible parts, in protected mode. * + * @param pIemCpu The IEM state of the calling EMT. * @param pSReg Pointer to the segment register. * @param uRpl The RPL. */ -static void iemHlpLoadNullDataSelectorProt(PCPUMSELREG pSReg, RTSEL uRpl) +static void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl) { /** @todo Testcase: write a testcase checking what happends when loading a NULL * data selector in protected mode. */ pSReg->Sel = uRpl; pSReg->ValidSel = uRpl; pSReg->fFlags = CPUMSELREG_FLAGS_VALID; - pSReg->u64Base = 0; - pSReg->u32Limit = 0; - pSReg->Attr.u = 0; + if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu)) + { + /* VT-x (Intel 3960x) observed doing something like this. */ + pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT); + pSReg->u32Limit = UINT32_MAX; + pSReg->u64Base = 0; + } + else + { + pSReg->Attr.u = X86DESCATTR_UNUSABLE; + pSReg->u32Limit = 0; + pSReg->u64Base = 0; + } } @@ -139,7 +238,7 @@ static void iemHlpAdjustSelectorForNewCpl(PIEMCPU pIemCpu, uint8_t uCpl, PCPUMSE && pSReg->Attr.n.u1DescType /* code or data, not system */ && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */ - iemHlpLoadNullDataSelectorProt(pSReg, 0); + iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, 0); } @@ -165,7 +264,7 @@ DECLINLINE(void) iemHlpUsedFpu(PIEMCPU pIemCpu) IEM_CIMPL_DEF_0(iemCImpl_popa_16) { PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); - RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx); + RTGCPTR GCPtrStart = iemRegGetEffRsp(pIemCpu, pCtx); RTGCPTR GCPtrLast = GCPtrStart + 15; VBOXSTRICTRC rcStrict; @@ -190,7 +289,7 @@ IEM_CIMPL_DEF_0(iemCImpl_popa_16) rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp); if (rcStrict == VINF_SUCCESS) { - iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */ + iemRegAddToRspEx(pIemCpu, pCtx, &TmpRsp, 2); /* sp */ rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp); } if (rcStrict == VINF_SUCCESS) @@ -202,7 +301,7 @@ IEM_CIMPL_DEF_0(iemCImpl_popa_16) if (rcStrict == VINF_SUCCESS) { pCtx->rsp = TmpRsp.u; - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); } } else @@ -222,8 +321,8 @@ IEM_CIMPL_DEF_0(iemCImpl_popa_16) rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R); if (rcStrict == VINF_SUCCESS) { - iemRegAddToRsp(pCtx, 16); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRsp(pIemCpu, pCtx, 16); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); } } } @@ -237,7 +336,7 @@ IEM_CIMPL_DEF_0(iemCImpl_popa_16) IEM_CIMPL_DEF_0(iemCImpl_popa_32) { PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); - RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx); + RTGCPTR GCPtrStart = iemRegGetEffRsp(pIemCpu, pCtx); RTGCPTR GCPtrLast = GCPtrStart + 31; VBOXSTRICTRC rcStrict; @@ -262,7 +361,7 @@ IEM_CIMPL_DEF_0(iemCImpl_popa_32) rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp); if (rcStrict == VINF_SUCCESS) { - iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */ + iemRegAddToRspEx(pIemCpu, pCtx, &TmpRsp, 2); /* sp */ rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp); } if (rcStrict == VINF_SUCCESS) @@ -283,7 +382,7 @@ IEM_CIMPL_DEF_0(iemCImpl_popa_32) pCtx->rax &= UINT32_MAX; #endif pCtx->rsp = TmpRsp.u; - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); } } else @@ -303,8 +402,8 @@ IEM_CIMPL_DEF_0(iemCImpl_popa_32) rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R); if (rcStrict == VINF_SUCCESS) { - iemRegAddToRsp(pCtx, 32); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRsp(pIemCpu, pCtx, 32); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); } } } @@ -318,7 +417,7 @@ IEM_CIMPL_DEF_0(iemCImpl_popa_32) IEM_CIMPL_DEF_0(iemCImpl_pusha_16) { PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); - RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx); + RTGCPTR GCPtrTop = iemRegGetEffRsp(pIemCpu, pCtx); RTGCPTR GCPtrBottom = GCPtrTop - 15; VBOXSTRICTRC rcStrict; @@ -354,7 +453,7 @@ IEM_CIMPL_DEF_0(iemCImpl_pusha_16) if (rcStrict == VINF_SUCCESS) { pCtx->rsp = TmpRsp.u; - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); } } else @@ -375,8 +474,8 @@ IEM_CIMPL_DEF_0(iemCImpl_pusha_16) rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W); if (rcStrict == VINF_SUCCESS) { - iemRegSubFromRsp(pCtx, 16); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegSubFromRsp(pIemCpu, pCtx, 16); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); } } } @@ -390,7 +489,7 @@ IEM_CIMPL_DEF_0(iemCImpl_pusha_16) IEM_CIMPL_DEF_0(iemCImpl_pusha_32) { PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); - RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx); + RTGCPTR GCPtrTop = iemRegGetEffRsp(pIemCpu, pCtx); RTGCPTR GCPtrBottom = GCPtrTop - 31; VBOXSTRICTRC rcStrict; @@ -426,7 +525,7 @@ IEM_CIMPL_DEF_0(iemCImpl_pusha_32) if (rcStrict == VINF_SUCCESS) { pCtx->rsp = TmpRsp.u; - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); } } else @@ -447,8 +546,8 @@ IEM_CIMPL_DEF_0(iemCImpl_pusha_32) rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W); if (rcStrict == VINF_SUCCESS) { - iemRegSubFromRsp(pCtx, 32); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegSubFromRsp(pIemCpu, pCtx, 32); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); } } } @@ -505,7 +604,7 @@ IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize) if (rcStrict != VINF_SUCCESS) return rcStrict; - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -604,11 +703,19 @@ IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize) break; } case IEMMODE_32BIT: - case IEMMODE_64BIT: rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew); if (rcStrict != VINF_SUCCESS) return rcStrict; break; + case IEMMODE_64BIT: + { + uint64_t u64Value; + rcStrict = iemMemStackPopU64(pIemCpu, &u64Value); + if (rcStrict != VINF_SUCCESS) + return rcStrict; + fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */ + break; + } IEM_NOT_REACHED_DEFAULT_CASE_RET(); } @@ -636,7 +743,7 @@ IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize) */ Assert(fEflNew & RT_BIT_32(1)); IEMMISC_SET_EFL(pIemCpu, pCtx, fEflNew); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -661,8 +768,8 @@ IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC) return rcStrict; pCtx->rip = uNewPC; + pCtx->eflags.Bits.u1RF = 0; return VINF_SUCCESS; - } @@ -684,6 +791,7 @@ IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp) return rcStrict; pCtx->rip = uNewPC; + pCtx->eflags.Bits.u1RF = 0; return VINF_SUCCESS; } @@ -706,9 +814,26 @@ IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC) if (rcStrict != VINF_SUCCESS) return rcStrict; +#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE) && defined(VBOX_WITH_CALL_RECORD) + /* + * CASM hook for recording interesting indirect calls. + */ + if ( !pCtx->eflags.Bits.u1IF + && (pCtx->cr0 & X86_CR0_PG) + && !CSAMIsEnabled(IEMCPU_TO_VM(pIemCpu)) + && pIemCpu->uCpl == 0) + { + EMSTATE enmState = EMGetState(IEMCPU_TO_VMCPU(pIemCpu)); + if ( enmState == EMSTATE_IEM_THEN_REM + || enmState == EMSTATE_IEM + || enmState == EMSTATE_REM) + CSAMR3RecordCallAddress(IEMCPU_TO_VM(pIemCpu), pCtx->eip); + } +#endif + pCtx->rip = uNewPC; + pCtx->eflags.Bits.u1RF = 0; return VINF_SUCCESS; - } @@ -730,6 +855,7 @@ IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp) return rcStrict; pCtx->rip = uNewPC; + pCtx->eflags.Bits.u1RF = 0; return VINF_SUCCESS; } @@ -753,8 +879,8 @@ IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC) return rcStrict; pCtx->rip = uNewPC; + pCtx->eflags.Bits.u1RF = 0; return VINF_SUCCESS; - } @@ -776,6 +902,7 @@ IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp) return rcStrict; pCtx->rip = uNewPC; + pCtx->eflags.Bits.u1RF = 0; return VINF_SUCCESS; } @@ -791,7 +918,7 @@ IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp) */ IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc) { - /* Call various functions to do the work. */ + /* Call various functions to do the work. Clear RF? */ IEM_RETURN_ASPECT_NOT_IMPLEMENTED(); } @@ -807,7 +934,7 @@ IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch */ IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc) { - /* Call various functions to do the work. */ + /* Call various functions to do the work. Don't clear RF */ IEM_RETURN_ASPECT_NOT_IMPLEMENTED(); } @@ -823,7 +950,7 @@ IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, I */ IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc) { - /* Call various functions to do the work. */ + /* Call various functions to do the work. Clear RF. */ IEM_RETURN_ASPECT_NOT_IMPLEMENTED(); } @@ -923,6 +1050,7 @@ IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmE pCtx->cs.ValidSel = uSel; pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; pCtx->cs.u64Base = (uint32_t)uSel << 4; + pCtx->eflags.Bits.u1RF = 0; return VINF_SUCCESS; } @@ -937,7 +1065,7 @@ IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmE /* Fetch the descriptor. */ IEMSELDESC Desc; - VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel); + VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP); if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -1037,6 +1165,7 @@ IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmE pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); pCtx->cs.u32Limit = cbLimit; pCtx->cs.u64Base = u64Base; + pCtx->eflags.Bits.u1RF = 0; /** @todo check if the hidden bits are loaded correctly for 64-bit * mode. */ return VINF_SUCCESS; @@ -1100,6 +1229,7 @@ IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEf pCtx->cs.ValidSel = uSel; pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; pCtx->cs.u64Base = (uint32_t)uSel << 4; + pCtx->eflags.Bits.u1RF = 0; return VINF_SUCCESS; } @@ -1114,7 +1244,7 @@ IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEf /* Fetch the descriptor. */ IEMSELDESC Desc; - rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel); + rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP); if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -1252,6 +1382,7 @@ IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEf pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); pCtx->cs.u32Limit = cbLimit; pCtx->cs.u64Base = u64Base; + pCtx->eflags.Bits.u1RF = 0; /** @todo check if the hidden bits are loaded correctly for 64-bit * mode. */ return VINF_SUCCESS; @@ -1323,9 +1454,10 @@ IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop) pCtx->cs.ValidSel = uNewCs; pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; pCtx->cs.u64Base = (uint32_t)uNewCs << 4; + pCtx->eflags.Bits.u1RF = 0; /** @todo do we load attribs and limit as well? */ if (cbPop) - iemRegAddToRsp(pCtx, cbPop); + iemRegAddToRsp(pIemCpu, pCtx, cbPop); return VINF_SUCCESS; } @@ -1340,7 +1472,7 @@ IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop) /* Fetch the descriptor. */ IEMSELDESC DescCs; - rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs); + rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs, X86_XCPT_GP); if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -1441,7 +1573,7 @@ IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop) else { /* Fetch the descriptor for the new stack segment. */ - rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs); + rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs, X86_XCPT_GP); if (rcStrict != VINF_SUCCESS) return rcStrict; } @@ -1574,7 +1706,8 @@ IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop) * mode. */ if (cbPop) - iemRegAddToRsp(pCtx, cbPop); + iemRegAddToRsp(pIemCpu, pCtx, cbPop); + pCtx->eflags.Bits.u1RF = 0; /* Done! */ } @@ -1638,7 +1771,8 @@ IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop) /** @todo check if the hidden bits are loaded correctly for 64-bit * mode. */ if (cbPop) - iemRegAddToRsp(pCtx, cbPop); + iemRegAddToRsp(pIemCpu, pCtx, cbPop); + pCtx->eflags.Bits.u1RF = 0; } return VINF_SUCCESS; } @@ -1706,7 +1840,8 @@ IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop) pCtx->rip = NewRip.u; pCtx->rsp = NewRsp.u; if (cbPop) - iemRegAddToRsp(pCtx, cbPop); + iemRegAddToRsp(pIemCpu, pCtx, cbPop); + pCtx->eflags.Bits.u1RF = 0; return VINF_SUCCESS; } @@ -1814,14 +1949,14 @@ IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_ } /* Recalc RSP. */ - iemRegSubFromRspEx(&NewRsp, cbFrame, pCtx); + iemRegSubFromRspEx(pIemCpu, pCtx, &NewRsp, cbFrame); /** @todo Should probe write access at the new RSP according to AMD. */ /* Commit it. */ pCtx->rbp = NewRbp.u; pCtx->rsp = NewRsp.u; - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -1842,7 +1977,7 @@ IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize) /* Calculate the intermediate RSP from RBP and the stack attributes. */ RTUINT64U NewRsp; - if (pCtx->ss.Attr.n.u1Long) + if (pIemCpu->enmCpuMode == IEMMODE_64BIT) NewRsp.u = pCtx->rbp; else if (pCtx->ss.Attr.n.u1DefBig) NewRsp.u = pCtx->ebp; @@ -1878,7 +2013,7 @@ IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize) /* Commit it. */ pCtx->rbp = NewRbp.u; pCtx->rsp = NewRsp.u; - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -1918,7 +2053,8 @@ IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize) /* * iret throws an exception if VME isn't enabled. */ - if ( pCtx->eflags.Bits.u1VM + if ( Efl.Bits.u1VM + && Efl.Bits.u2IOPL != 3 && !(pCtx->cr4 & X86_CR4_VME)) return iemRaiseGeneralProtectionFault0(pIemCpu); @@ -1939,6 +2075,9 @@ IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize) if (rcStrict != VINF_SUCCESS) return rcStrict; uNewEip = uFrame.pu32[0]; + if (uNewEip > UINT16_MAX) + return iemRaiseGeneralProtectionFault0(pIemCpu); + uNewCs = (uint16_t)uFrame.pu32[1]; uNewFlags = uFrame.pu32[2]; uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF @@ -2003,6 +2142,11 @@ IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize) rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp); if (rcStrict != VINF_SUCCESS) return rcStrict; +#ifdef DBGFTRACE_ENABLED + RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx", + pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewRsp); +#endif + pCtx->rip = uNewEip; pCtx->cs.Sel = uNewCs; pCtx->cs.ValidSel = uNewCs; @@ -2043,19 +2187,12 @@ static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg) * @param uNewCs The new CS. * @param uNewFlags The new EFLAGS. * @param uNewRsp The RSP after the initial IRET frame. + * + * @note This can only be a 32-bit iret du to the X86_EFL_VM position. */ IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp) { -#if 0 - if (!LogIs6Enabled()) - { - RTLogGroupSettings(NULL, "iem.eo.l6.l2"); - RTLogFlags(NULL, "enabled"); - return VERR_IEM_RESTART_INSTRUCTION; - } -#endif - /* * Pop the V8086 specific frame bits off the stack. */ @@ -2077,6 +2214,14 @@ IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uin /* * Commit the operation. */ + uNewFlags &= X86_EFL_LIVE_MASK; + uNewFlags |= X86_EFL_RA1_MASK; +#ifdef DBGFTRACE_ENABLED + RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x", + pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp); +#endif + + IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags); iemCImplCommonV8086LoadSeg(&pCtx->cs, uNewCs); iemCImplCommonV8086LoadSeg(&pCtx->ss, uNewSs); iemCImplCommonV8086LoadSeg(&pCtx->es, uNewEs); @@ -2085,7 +2230,6 @@ IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uin iemCImplCommonV8086LoadSeg(&pCtx->gs, uNewGs); pCtx->rip = uNewEip; pCtx->rsp = uNewEsp; - pCtx->rflags.u = uNewFlags; pIemCpu->uCpl = 3; return VINF_SUCCESS; @@ -2175,7 +2319,7 @@ IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize) } IEMSELDESC DescCS; - rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs); + rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs, X86_XCPT_GP); if (rcStrict != VINF_SUCCESS) { Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict))); @@ -2194,6 +2338,36 @@ IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize) return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); } +#ifdef VBOX_WITH_RAW_MODE_NOT_R0 + /* Raw ring-0 and ring-1 compression adjustments for PATM performance tricks and other CS leaks. */ + PVM pVM = IEMCPU_TO_VM(pIemCpu); + if (EMIsRawRing0Enabled(pVM) && !HMIsEnabled(pVM)) + { + if ((uNewCs & X86_SEL_RPL) == 1) + { + if ( pIemCpu->uCpl == 0 + && ( !EMIsRawRing1Enabled(pVM) + || pCtx->cs.Sel == (uNewCs & X86_SEL_MASK_OFF_RPL)) ) + { + Log(("iret: Ring-0 compression fix: uNewCS=%#x -> %#x\n", uNewCs, uNewCs & X86_SEL_MASK_OFF_RPL)); + uNewCs &= X86_SEL_MASK_OFF_RPL; + } +# ifdef LOG_ENABLED + else if (pIemCpu->uCpl <= 1 && EMIsRawRing1Enabled(pVM)) + Log(("iret: uNewCs=%#x genuine return to ring-1.\n", uNewCs)); +# endif + } + else if ( (uNewCs & X86_SEL_RPL) == 2 + && EMIsRawRing1Enabled(pVM) + && pIemCpu->uCpl <= 1) + { + Log(("iret: Ring-1 compression fix: uNewCS=%#x -> %#x\n", uNewCs, (uNewCs & X86_SEL_MASK_OFF_RPL) | 1)); + uNewCs = (uNewCs & X86_SEL_MASK_OFF_RPL) | 2; + } + } +#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ + + /* Privilege checks. */ if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl) { @@ -2251,7 +2425,7 @@ IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize) } IEMSELDESC DescSS; - rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS); + rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */ if (rcStrict != VINF_SUCCESS) { Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n", @@ -2322,6 +2496,24 @@ IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize) DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; } + uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF + | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT; + if (enmEffOpSize != IEMMODE_16BIT) + fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID; + if (pIemCpu->uCpl == 0) + fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */ + else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL) + fEFlagsMask |= X86_EFL_IF; + uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx); + fEFlagsNew &= ~fEFlagsMask; + fEFlagsNew |= uNewFlags & fEFlagsMask; +#ifdef DBGFTRACE_ENABLED + RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x", + pIemCpu->uCpl, uNewCs & X86_SEL_RPL, pCtx->cs.Sel, pCtx->eip, + uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP); +#endif + + IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew); pCtx->rip = uNewEip; pCtx->cs.Sel = uNewCs; pCtx->cs.ValidSel = uNewCs; @@ -2329,7 +2521,10 @@ IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize) pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); pCtx->cs.u32Limit = cbLimitCS; pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); - pCtx->rsp = uNewESP; + if (!pCtx->cs.Attr.n.u1DefBig) + pCtx->sp = (uint16_t)uNewESP; + else + pCtx->rsp = uNewESP; pCtx->ss.Sel = uNewSS; pCtx->ss.ValidSel = uNewSS; pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; @@ -2337,19 +2532,6 @@ IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize) pCtx->ss.u32Limit = cbLimitSs; pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy); - uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF - | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT; - if (enmEffOpSize != IEMMODE_16BIT) - fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID; - if (pIemCpu->uCpl == 0) - fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */ - else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL) - fEFlagsMask |= X86_EFL_IF; - uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx); - fEFlagsNew &= ~fEFlagsMask; - fEFlagsNew |= uNewFlags & fEFlagsMask; - IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew); - pIemCpu->uCpl = uNewCs & X86_SEL_RPL; iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds); iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es); @@ -2382,18 +2564,9 @@ IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize) DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; } - pCtx->rip = uNewEip; - pCtx->cs.Sel = uNewCs; - pCtx->cs.ValidSel = uNewCs; - pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; - pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); - pCtx->cs.u32Limit = cbLimitCS; - pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); - pCtx->rsp = uNewRsp; - X86EFLAGS NewEfl; NewEfl.u = IEMMISC_GET_EFL(pIemCpu, pCtx); - uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF + uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT; if (enmEffOpSize != IEMMODE_16BIT) fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID; @@ -2403,7 +2576,21 @@ IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize) fEFlagsMask |= X86_EFL_IF; NewEfl.u &= ~fEFlagsMask; NewEfl.u |= fEFlagsMask & uNewFlags; +#ifdef DBGFTRACE_ENABLED + RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx", + pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip, + uNewCs, uNewEip, uNewFlags, pCtx->ss.Sel, uNewRsp); +#endif + IEMMISC_SET_EFL(pIemCpu, pCtx, NewEfl.u); + pCtx->rip = uNewEip; + pCtx->cs.Sel = uNewCs; + pCtx->cs.ValidSel = uNewCs; + pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; + pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); + pCtx->cs.u32Limit = cbLimitCS; + pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); + pCtx->rsp = uNewRsp; /* Done! */ } return VINF_SUCCESS; @@ -2417,12 +2604,286 @@ IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize) */ IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize) { - //PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); - //VBOXSTRICTRC rcStrict; - //uint64_t uNewRsp; + PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); + NOREF(cbInstr); - NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize); - IEM_RETURN_ASPECT_NOT_IMPLEMENTED(); + /* + * Nested task return is not supported in long mode. + */ + if (pCtx->eflags.Bits.u1NT) + { + Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pCtx->eflags.u)); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + + /* + * Normal return. + * + * Do the stack bits, but don't commit RSP before everything checks + * out right. + */ + VBOXSTRICTRC rcStrict; + RTCPTRUNION uFrame; + uint64_t uNewRip; + uint16_t uNewCs; + uint16_t uNewSs; + uint32_t uNewFlags; + uint64_t uNewRsp; + if (enmEffOpSize == IEMMODE_64BIT) + { + rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*8, &uFrame.pv, &uNewRsp); + if (rcStrict != VINF_SUCCESS) + return rcStrict; + uNewRip = uFrame.pu64[0]; + uNewCs = (uint16_t)uFrame.pu64[1]; + uNewFlags = (uint32_t)uFrame.pu64[2]; + uNewRsp = uFrame.pu64[3]; + uNewSs = (uint16_t)uFrame.pu64[4]; + } + else if (enmEffOpSize == IEMMODE_32BIT) + { + rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*4, &uFrame.pv, &uNewRsp); + if (rcStrict != VINF_SUCCESS) + return rcStrict; + uNewRip = uFrame.pu32[0]; + uNewCs = (uint16_t)uFrame.pu32[1]; + uNewFlags = uFrame.pu32[2]; + uNewRsp = uFrame.pu32[3]; + uNewSs = (uint16_t)uFrame.pu32[4]; + } + else + { + Assert(enmEffOpSize == IEMMODE_16BIT); + rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*2, &uFrame.pv, &uNewRsp); + if (rcStrict != VINF_SUCCESS) + return rcStrict; + uNewRip = uFrame.pu16[0]; + uNewCs = uFrame.pu16[1]; + uNewFlags = uFrame.pu16[2]; + uNewRsp = uFrame.pu16[3]; + uNewSs = uFrame.pu16[4]; + } + rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */ + if (rcStrict != VINF_SUCCESS) + return rcStrict; + Log2(("iretq stack: cs:rip=%04x:%016RX16 rflags=%016RX16 ss:rsp=%04x:%016RX16\n", + uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp)); + + /* + * Check stuff. + */ + /* Read the CS descriptor. */ + if (!(uNewCs & X86_SEL_MASK_OFF_RPL)) + { + Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp)); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + + IEMSELDESC DescCS; + rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs, X86_XCPT_GP); + if (rcStrict != VINF_SUCCESS) + { + Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n", + uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict))); + return rcStrict; + } + + /* Must be a code descriptor. */ + if ( !DescCS.Legacy.Gen.u1DescType + || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)) + { + Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n", + uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type)); + return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); + } + + /* Privilege checks. */ + uint8_t const uNewCpl = uNewCs & X86_SEL_RPL; + if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl) + { + Log(("iret %04x:%016RX64/%04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp, pIemCpu->uCpl)); + return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); + } + if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) + && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl) + { + Log(("iret %04x:%016RX64/%04x:%016RX64 - RPL < DPL (%d) -> #GP\n", + uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u2Dpl)); + return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); + } + + /* Present? */ + if (!DescCS.Legacy.Gen.u1Present) + { + Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp)); + return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs); + } + + uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy); + + /* Read the SS descriptor. */ + IEMSELDESC DescSS; + if (!(uNewSs & X86_SEL_MASK_OFF_RPL)) + { + if ( !DescCS.Legacy.Gen.u1Long + || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */ + || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */ + { + Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp)); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + DescSS.Legacy.u = 0; + } + else + { + rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */ + if (rcStrict != VINF_SUCCESS) + { + Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n", + uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict))); + return rcStrict; + } + } + + /* Privilege checks. */ + if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL)) + { + Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp)); + return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs); + } + + uint32_t cbLimitSs; + if (!(uNewSs & X86_SEL_MASK_OFF_RPL)) + cbLimitSs = UINT32_MAX; + else + { + if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL)) + { + Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n", + uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl)); + return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs); + } + + /* Must be a writeable data segment descriptor. */ + if (!DescSS.Legacy.Gen.u1DescType) + { + Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n", + uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type)); + return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs); + } + if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE) + { + Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n", + uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type)); + return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs); + } + + /* Present? */ + if (!DescSS.Legacy.Gen.u1Present) + { + Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp)); + return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSs); + } + cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy); + } + + /* Check EIP. */ + if (DescCS.Legacy.Gen.u1Long) + { + if (!IEM_IS_CANONICAL(uNewRip)) + { + Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n", + uNewCs, uNewRip, uNewSs, uNewRsp)); + return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs); + } + } + else + { + if (uNewRip > cbLimitCS) + { + Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n", + uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS)); + return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs); + } + } + + /* + * Commit the changes, marking CS and SS accessed first since + * that may fail. + */ + /** @todo where exactly are these actually marked accessed by a real CPU? */ + if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) + { + rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs); + if (rcStrict != VINF_SUCCESS) + return rcStrict; + DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; + } + if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) + { + rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSs); + if (rcStrict != VINF_SUCCESS) + return rcStrict; + DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; + } + + uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF + | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT; + if (enmEffOpSize != IEMMODE_16BIT) + fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID; + if (pIemCpu->uCpl == 0) + fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */ + else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL) + fEFlagsMask |= X86_EFL_IF; + uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx); + fEFlagsNew &= ~fEFlagsMask; + fEFlagsNew |= uNewFlags & fEFlagsMask; +#ifdef DBGFTRACE_ENABLED + RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx", + pIemCpu->uCpl, uNewCpl, pCtx->rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp); +#endif + + IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew); + pCtx->rip = uNewRip; + pCtx->cs.Sel = uNewCs; + pCtx->cs.ValidSel = uNewCs; + pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; + pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); + pCtx->cs.u32Limit = cbLimitCS; + pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); + if (pCtx->cs.Attr.n.u1Long || pCtx->cs.Attr.n.u1DefBig) + pCtx->rsp = uNewRsp; + else + pCtx->sp = (uint16_t)uNewRsp; + pCtx->ss.Sel = uNewSs; + pCtx->ss.ValidSel = uNewSs; + if (!(uNewSs & X86_SEL_MASK_OFF_RPL)) + { + pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; + pCtx->ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT); + pCtx->ss.u32Limit = UINT32_MAX; + pCtx->ss.u64Base = 0; + Log2(("iretq new SS: NULL\n")); + } + else + { + pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; + pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy); + pCtx->ss.u32Limit = cbLimitSs; + pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy); + Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u)); + } + + if (pIemCpu->uCpl != uNewCpl) + { + pIemCpu->uCpl = uNewCpl; + iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->ds); + iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->es); + iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->fs); + iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->gs); + } + + return VINF_SUCCESS; } @@ -2436,8 +2897,7 @@ IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize) /* * Call a mode specific worker. */ - if ( pIemCpu->enmCpuMode == IEMMODE_16BIT - && IEM_IS_REAL_OR_V86_MODE(pIemCpu)) + if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize); if (IEM_IS_LONG_MODE(pIemCpu)) return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize); @@ -2447,6 +2907,207 @@ IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize) /** + * Implements SYSCALL (AMD and Intel64). + * + * @param enmEffOpSize The effective operand size. + */ +IEM_CIMPL_DEF_0(iemCImpl_syscall) +{ + PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); + + /* + * Check preconditions. + * + * Note that CPUs described in the documentation may load a few odd values + * into CS and SS than we allow here. This has yet to be checked on real + * hardware. + */ + if (!(pCtx->msrEFER & MSR_K6_EFER_SCE)) + { + Log(("syscall: Not enabled in EFER -> #UD\n")); + return iemRaiseUndefinedOpcode(pIemCpu); + } + if (!(pCtx->cr0 & X86_CR0_PE)) + { + Log(("syscall: Protected mode is required -> #GP(0)\n")); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !CPUMIsGuestInLongModeEx(pCtx)) + { + Log(("syscall: Only available in long mode on intel -> #UD\n")); + return iemRaiseUndefinedOpcode(pIemCpu); + } + + /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */ + /** @todo what about LDT selectors? Shouldn't matter, really. */ + uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL; + uint16_t uNewSs = uNewCs + 8; + if (uNewCs == 0 || uNewSs == 0) + { + Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n")); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + + /* Long mode and legacy mode differs. */ + if (CPUMIsGuestInLongModeEx(pCtx)) + { + uint64_t uNewRip = pIemCpu->enmCpuMode == IEMMODE_64BIT ? pCtx->msrLSTAR : pCtx-> msrCSTAR; + + /* This test isn't in the docs, but I'm not trusting the guys writing + the MSRs to have validated the values as canonical like they should. */ + if (!IEM_IS_CANONICAL(uNewRip)) + { + Log(("syscall: Only available in long mode on intel -> #UD\n")); + return iemRaiseUndefinedOpcode(pIemCpu); + } + + /* + * Commit it. + */ + Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, uNewRip)); + pCtx->rcx = pCtx->rip + cbInstr; + pCtx->rip = uNewRip; + + pCtx->rflags.u &= ~X86_EFL_RF; + pCtx->r11 = pCtx->rflags.u; + pCtx->rflags.u &= ~pCtx->msrSFMASK; + pCtx->rflags.u |= X86_EFL_1; + + pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC; + pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC; + } + else + { + /* + * Commit it. + */ + Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", + pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, (uint32_t)(pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK))); + pCtx->rcx = pCtx->eip + cbInstr; + pCtx->rip = pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK; + pCtx->rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF); + + pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC; + pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC; + } + pCtx->cs.Sel = uNewCs; + pCtx->cs.ValidSel = uNewCs; + pCtx->cs.u64Base = 0; + pCtx->cs.u32Limit = UINT32_MAX; + pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; + + pCtx->ss.Sel = uNewSs; + pCtx->ss.ValidSel = uNewSs; + pCtx->ss.u64Base = 0; + pCtx->ss.u32Limit = UINT32_MAX; + pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; + + return VINF_SUCCESS; +} + + +/** + * Implements SYSRET (AMD and Intel64). + */ +IEM_CIMPL_DEF_0(iemCImpl_sysret) + +{ + PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); + + /* + * Check preconditions. + * + * Note that CPUs described in the documentation may load a few odd values + * into CS and SS than we allow here. This has yet to be checked on real + * hardware. + */ + if (!(pCtx->msrEFER & MSR_K6_EFER_SCE)) + { + Log(("sysret: Not enabled in EFER -> #UD\n")); + return iemRaiseUndefinedOpcode(pIemCpu); + } + if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !CPUMIsGuestInLongModeEx(pCtx)) + { + Log(("sysret: Only available in long mode on intel -> #UD\n")); + return iemRaiseUndefinedOpcode(pIemCpu); + } + if (!(pCtx->cr0 & X86_CR0_PE)) + { + Log(("sysret: Protected mode is required -> #GP(0)\n")); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + if (pIemCpu->uCpl != 0) + { + Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", pIemCpu->uCpl)); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + + /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */ + uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL; + uint16_t uNewSs = uNewCs + 8; + if (pIemCpu->enmEffOpSize == IEMMODE_64BIT) + uNewCs += 16; + if (uNewCs == 0 || uNewSs == 0) + { + Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n")); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + + /* + * Commit it. + */ + if (CPUMIsGuestInLongModeEx(pCtx)) + { + if (pIemCpu->enmEffOpSize == IEMMODE_64BIT) + { + Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n", + pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->rcx, pCtx->r11)); + /* Note! We disregard intel manual regarding the RCX cananonical + check, ask intel+xen why AMD doesn't do it. */ + pCtx->rip = pCtx->rcx; + pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC + | (3 << X86DESCATTR_DPL_SHIFT); + } + else + { + Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n", + pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->ecx, pCtx->r11)); + pCtx->rip = pCtx->ecx; + pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC + | (3 << X86DESCATTR_DPL_SHIFT); + } + /** @todo testcase: See what kind of flags we can make SYSRET restore and + * what it really ignores. RF and VM are hinted at being zero, by AMD. */ + pCtx->rflags.u = pCtx->r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP); + pCtx->rflags.u |= X86_EFL_1; + } + else + { + Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, pCtx->ecx)); + pCtx->rip = pCtx->rcx; + pCtx->rflags.u |= X86_EFL_IF; + pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC + | (3 << X86DESCATTR_DPL_SHIFT); + } + pCtx->cs.Sel = uNewCs | 3; + pCtx->cs.ValidSel = uNewCs | 3; + pCtx->cs.u64Base = 0; + pCtx->cs.u32Limit = UINT32_MAX; + pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; + + pCtx->ss.Sel = uNewSs | 3; + pCtx->ss.ValidSel = uNewSs | 3; + pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; + /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */ + pCtx->ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT); + /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged + * on sysret. */ + + return VINF_SUCCESS; +} + + +/** * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'. * * @param iSegReg The segment register number (valid). @@ -2483,7 +3144,7 @@ IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel) : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE; #endif CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -2495,55 +3156,38 @@ IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel) */ if (!(uSel & X86_SEL_MASK_OFF_RPL)) { + Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */ if (iSegReg == X86_SREG_SS) { + /* In 64-bit kernel mode, the stack can be 0 because of the way + interrupts are dispatched. AMD seems to have a slighly more + relaxed relationship to SS.RPL than intel does. */ + /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */ if ( pIemCpu->enmCpuMode != IEMMODE_64BIT - || pIemCpu->uCpl != 0 - || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */ + || pIemCpu->uCpl > 2 + || ( uSel != pIemCpu->uCpl + && !IEM_IS_GUEST_CPU_AMD(pIemCpu)) ) { - Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel)); + Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel)); return iemRaiseGeneralProtectionFault0(pIemCpu); } - - /* In 64-bit kernel mode, the stack can be 0 because of the way - interrupts are dispatched when in kernel ctx. Just load the - selector value into the register and leave the hidden bits - as is. */ - *pSel = uSel; - pHid->ValidSel = uSel; - iemRegAddToRip(pIemCpu, cbInstr); - return VINF_SUCCESS; } *pSel = uSel; /* Not RPL, remember :-) */ - if ( pIemCpu->enmCpuMode == IEMMODE_64BIT - && iSegReg != X86_SREG_FS - && iSegReg != X86_SREG_GS) - { - /** @todo figure out what this actually does, it works. Needs - * testcase! */ - pHid->Attr.u = 0; - pHid->Attr.n.u1Present = 1; - pHid->Attr.n.u1Long = 1; - pHid->Attr.n.u4Type = X86_SEL_TYPE_RW; - pHid->Attr.n.u2Dpl = 3; - pHid->u32Limit = 0; - pHid->u64Base = 0; - pHid->ValidSel = uSel; - pHid->fFlags = CPUMSELREG_FLAGS_VALID; - } - else - iemHlpLoadNullDataSelectorProt(pHid, uSel); + iemHlpLoadNullDataSelectorProt(pIemCpu, pHid, uSel); + if (iSegReg == X86_SREG_SS) + pHid->Attr.u |= pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT; + Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid)); CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } /* Fetch the descriptor. */ IEMSELDESC Desc; - VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel); + VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */ if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -2648,7 +3292,7 @@ IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel) Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid)); CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -2775,10 +3419,243 @@ IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg, /** + * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory. + * + * @retval VINF_SUCCESS on success. + * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok. + * @retval iemMemFetchSysU64 return value. + * + * @param pIemCpu The IEM state of the calling EMT. + * @param uSel The selector value. + * @param fAllowSysDesc Whether system descriptors are OK or not. + * @param pDesc Where to return the descriptor on success. + */ +static VBOXSTRICTRC iemCImpl_LoadDescHelper(PIEMCPU pIemCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc) +{ + pDesc->Long.au64[0] = 0; + pDesc->Long.au64[1] = 0; + + if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */ + return VINF_IEM_SELECTOR_NOT_OK; + + /* Within the table limits? */ + PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); + RTGCPTR GCPtrBase; + if (uSel & X86_SEL_LDT) + { + if ( !pCtx->ldtr.Attr.n.u1Present + || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit ) + return VINF_IEM_SELECTOR_NOT_OK; + GCPtrBase = pCtx->ldtr.u64Base; + } + else + { + if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt) + return VINF_IEM_SELECTOR_NOT_OK; + GCPtrBase = pCtx->gdtr.pGdt; + } + + /* Fetch the descriptor. */ + VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK)); + if (rcStrict != VINF_SUCCESS) + return rcStrict; + if (!pDesc->Legacy.Gen.u1DescType) + { + if (!fAllowSysDesc) + return VINF_IEM_SELECTOR_NOT_OK; + if (CPUMIsGuestInLongModeEx(pCtx)) + { + rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8); + if (rcStrict != VINF_SUCCESS) + return rcStrict; + } + + } + + return VINF_SUCCESS; +} + + +/** + * Implements verr (fWrite = false) and verw (fWrite = true). + */ +IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite) +{ + Assert(!IEM_IS_REAL_OR_V86_MODE(pIemCpu)); + + /** @todo figure whether the accessed bit is set or not. */ + + bool fAccessible = true; + IEMSELDESC Desc; + VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pIemCpu, uSel, false /*fAllowSysDesc*/, &Desc); + if (rcStrict == VINF_SUCCESS) + { + /* Check the descriptor, order doesn't matter much here. */ + if ( !Desc.Legacy.Gen.u1DescType + || !Desc.Legacy.Gen.u1Present) + fAccessible = false; + else + { + if ( fWrite + ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE + : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE) + fAccessible = false; + + /** @todo testcase for the conforming behavior. */ + if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) + != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) + { + if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl) + fAccessible = false; + else if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl) + fAccessible = false; + } + } + + } + else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK) + fAccessible = false; + else + return rcStrict; + + /* commit */ + pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fAccessible; + + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); + return VINF_SUCCESS; +} + + +/** + * Implements LAR and LSL with 64-bit operand size. + * + * @returns VINF_SUCCESS. + * @param pu16Dst Pointer to the destination register. + * @param uSel The selector to load details for. + * @param pEFlags Pointer to the eflags register. + * @param fIsLar true = LAR, false = LSL. + */ +IEM_CIMPL_DEF_4(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, uint32_t *, pEFlags, bool, fIsLar) +{ + Assert(!IEM_IS_REAL_OR_V86_MODE(pIemCpu)); + + /** @todo figure whether the accessed bit is set or not. */ + + bool fDescOk = true; + IEMSELDESC Desc; + VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pIemCpu, uSel, false /*fAllowSysDesc*/, &Desc); + if (rcStrict == VINF_SUCCESS) + { + /* + * Check the descriptor type. + */ + if (!Desc.Legacy.Gen.u1DescType) + { + if (CPUMIsGuestInLongModeEx(pIemCpu->CTX_SUFF(pCtx))) + { + if (Desc.Long.Gen.u5Zeros) + fDescOk = false; + else + switch (Desc.Long.Gen.u4Type) + { + /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */ + case AMD64_SEL_TYPE_SYS_TSS_AVAIL: + case AMD64_SEL_TYPE_SYS_TSS_BUSY: + case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */ + break; + case AMD64_SEL_TYPE_SYS_CALL_GATE: + fDescOk = fIsLar; + break; + default: + fDescOk = false; + break; + } + } + else + { + switch (Desc.Long.Gen.u4Type) + { + case X86_SEL_TYPE_SYS_286_TSS_AVAIL: + case X86_SEL_TYPE_SYS_286_TSS_BUSY: + case X86_SEL_TYPE_SYS_386_TSS_AVAIL: + case X86_SEL_TYPE_SYS_386_TSS_BUSY: + case X86_SEL_TYPE_SYS_LDT: + break; + case X86_SEL_TYPE_SYS_286_CALL_GATE: + case X86_SEL_TYPE_SYS_TASK_GATE: + case X86_SEL_TYPE_SYS_386_CALL_GATE: + fDescOk = fIsLar; + break; + default: + fDescOk = false; + break; + } + } + } + if (fDescOk) + { + /* + * Check the RPL/DPL/CPL interaction.. + */ + /** @todo testcase for the conforming behavior. */ + if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) + || !Desc.Legacy.Gen.u1DescType) + { + if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl) + fDescOk = false; + else if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl) + fDescOk = false; + } + } + + if (fDescOk) + { + /* + * All fine, start committing the result. + */ + if (fIsLar) + *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00); + else + *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy); + } + + } + else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK) + fDescOk = false; + else + return rcStrict; + + /* commit flags value and advance rip. */ + pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fDescOk; + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); + + return VINF_SUCCESS; +} + + +/** + * Implements LAR and LSL with 16-bit operand size. + * + * @returns VINF_SUCCESS. + * @param pu16Dst Pointer to the destination register. + * @param u16Sel The selector to load details for. + * @param pEFlags Pointer to the eflags register. + * @param fIsLar true = LAR, false = LSL. + */ +IEM_CIMPL_DEF_4(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, uint32_t *, pEFlags, bool, fIsLar) +{ + uint64_t u64TmpDst = *pu16Dst; + IEM_CIMPL_CALL_4(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, pEFlags, fIsLar); + *pu16Dst = (uint16_t)u64TmpDst; + return VINF_SUCCESS; +} + + +/** * Implements lgdt. * - * @param iEffSeg The segment of the new ldtr contents - * @param GCPtrEffSrc The address of the new ldtr contents. + * @param iEffSeg The segment of the new gdtr contents + * @param GCPtrEffSrc The address of the new gdtr contents. * @param enmEffOpSize The effective operand size. */ IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize) @@ -2804,7 +3681,7 @@ IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, pCtx->gdtr.pGdt = GCPtrBase; } if (rcStrict == VINF_SUCCESS) - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); } return rcStrict; } @@ -2827,7 +3704,7 @@ IEM_CIMPL_DEF_3(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst, enmEffOpSize); if (rcStrict == VINF_SUCCESS) - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return rcStrict; } @@ -2835,8 +3712,8 @@ IEM_CIMPL_DEF_3(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, /** * Implements lidt. * - * @param iEffSeg The segment of the new ldtr contents - * @param GCPtrEffSrc The address of the new ldtr contents. + * @param iEffSeg The segment of the new idtr contents + * @param GCPtrEffSrc The address of the new idtr contents. * @param enmEffOpSize The effective operand size. */ IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize) @@ -2861,7 +3738,7 @@ IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, pCtx->idtr.cbIdt = cbLimit; pCtx->idtr.pIdt = GCPtrBase; } - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); } return rcStrict; } @@ -2884,7 +3761,7 @@ IEM_CIMPL_DEF_3(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst, enmEffOpSize); if (rcStrict == VINF_SUCCESS) - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return rcStrict; } @@ -2929,15 +3806,25 @@ IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt) pCtx->ldtr.Sel = uNewLdt; pCtx->ldtr.ValidSel = uNewLdt; pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID; - if (IEM_IS_GUEST_CPU_AMD(pIemCpu) && !IEM_VERIFICATION_ENABLED(pIemCpu)) - pCtx->ldtr.Attr.u = 0; - else + if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu)) { + pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE; + pCtx->ldtr.u64Base = pCtx->ldtr.u32Limit = 0; /* For verfication against REM. */ + } + else if (IEM_IS_GUEST_CPU_AMD(pIemCpu)) + { + /* AMD-V seems to leave the base and limit alone. */ + pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE; + } + else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu)) + { + /* VT-x (Intel 3960x) seems to be doing the following. */ + pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D; pCtx->ldtr.u64Base = 0; - pCtx->ldtr.u32Limit = 0; + pCtx->ldtr.u32Limit = UINT32_MAX; } - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -2945,7 +3832,7 @@ IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt) * Read the descriptor. */ IEMSELDESC Desc; - VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt); + VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */ if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -3000,7 +3887,7 @@ IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt) pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy); pCtx->ldtr.u64Base = u64Base; - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -3042,7 +3929,7 @@ IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr) * Read the descriptor. */ IEMSELDESC Desc; - VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr); + VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */ if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -3092,17 +3979,17 @@ IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr) * assembly and such. */ void *pvDesc; - rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW); + rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW); if (rcStrict != VINF_SUCCESS) return rcStrict; switch ((uintptr_t)pvDesc & 3) { case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break; case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break; - case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 16); break; - case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 8); break; + case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break; + case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break; } - rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW); + rcStrict = iemMemCommitAndUnmap(pIemCpu, pvDesc, IEM_ACCESS_DATA_RW); if (rcStrict != VINF_SUCCESS) return rcStrict; Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK; @@ -3121,7 +4008,7 @@ IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr) pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy); pCtx->tr.u64Base = u64Base; - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -3148,11 +4035,15 @@ IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg) case 3: crX = pCtx->cr3; break; case 4: crX = pCtx->cr4; break; case 8: - if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu)) - IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement CR8/TPR read\n")); /** @todo implement CR8 reading and writing. */ + { + uint8_t uTpr; + int rc = PDMApicGetTPR(IEMCPU_TO_VMCPU(pIemCpu), &uTpr, NULL, NULL); + if (RT_SUCCESS(rc)) + crX = uTpr >> 4; else - crX = 0xff; + crX = 0; break; + } IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */ } @@ -3162,7 +4053,7 @@ IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg) else *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX; - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -3255,9 +4146,9 @@ IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX) { uint64_t NewEFER = pCtx->msrEFER; if (uNewCrX & X86_CR0_PG) - NewEFER |= MSR_K6_EFER_LME; + NewEFER |= MSR_K6_EFER_LMA; else - NewEFER &= ~MSR_K6_EFER_LME; + NewEFER &= ~MSR_K6_EFER_LMA; if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu)) CPUMSetGuestEFER(pVCpu, NewEFER); @@ -3386,7 +4277,7 @@ IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX) /* long mode checks. */ if ( (uOldCrX & X86_CR4_PAE) && !(uNewCrX & X86_CR4_PAE) - && (pCtx->msrEFER & MSR_K6_EFER_LMA) ) + && CPUMIsGuestInLongModeEx(pCtx) ) { Log(("Trying to set clear CR4.PAE while long mode is active\n")); return iemRaiseGeneralProtectionFault0(pIemCpu); @@ -3415,7 +4306,10 @@ IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX) { Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n", RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) )); - VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); +#ifdef VBOX_WITH_RAW_MODE + if (!HMIsEnabled(IEMCPU_TO_VM(pIemCpu))) + VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); +#endif } /* PGM - flushing and mode. */ @@ -3436,10 +4330,15 @@ IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX) * CR8 maps to the APIC TPR. */ case 8: + if (uNewCrX & ~(uint64_t)0xf) + { + Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX)); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu)) - IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement CR8/TPR read\n")); /** @todo implement CR8 reading and writing. */ - else - rcStrict = VINF_SUCCESS; + PDMApicSetTPR(IEMCPU_TO_VMCPU(pIemCpu), (uint8_t)uNewCrX << 4); + rcStrict = VINF_SUCCESS; break; IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */ @@ -3452,7 +4351,7 @@ IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX) { if (rcStrict != VINF_SUCCESS) rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); } return rcStrict; @@ -3566,14 +4465,14 @@ IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg) case 6: case 4: drX = pCtx->dr[6]; - drX &= ~RT_BIT_32(12); - drX |= UINT32_C(0xffff0ff0); + drX |= X86_DR6_RA1_MASK; + drX &= ~X86_DR6_RAZ_MASK; break; case 7: case 5: drX = pCtx->dr[7]; - drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15)); - drX |= RT_BIT_32(10); + drX |=X86_DR7_RA1_MASK; + drX &= ~X86_DR7_RAZ_MASK; break; IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */ } @@ -3583,7 +4482,7 @@ IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg) else *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX; - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -3605,11 +4504,14 @@ IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg) return iemRaiseGeneralProtectionFault0(pIemCpu); Assert(!pCtx->eflags.Bits.u1VM); - if ( (iDrReg == 4 || iDrReg == 5) - && (pCtx->cr4 & X86_CR4_DE) ) + if (iDrReg == 4 || iDrReg == 5) { - Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg)); - return iemRaiseGeneralProtectionFault0(pIemCpu); + if (pCtx->cr4 & X86_CR4_DE) + { + Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg)); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + iDrReg += 2; } /* Raise #DB if general access detect is enabled. */ @@ -3643,25 +4545,23 @@ IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg) break; case 6: - case 4: - if (uNewDrX & UINT64_C(0xffffffff00000000)) + if (uNewDrX & X86_DR6_MBZ_MASK) { Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX)); return iemRaiseGeneralProtectionFault0(pIemCpu); } - uNewDrX &= ~RT_BIT_32(12); - uNewDrX |= UINT32_C(0xffff0ff0); + uNewDrX |= X86_DR6_RA1_MASK; + uNewDrX &= ~X86_DR6_RAZ_MASK; break; case 7: - case 5: - if (uNewDrX & UINT64_C(0xffffffff00000000)) + if (uNewDrX & X86_DR7_MBZ_MASK) { Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX)); return iemRaiseGeneralProtectionFault0(pIemCpu); } - uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15)); - uNewDrX |= RT_BIT_32(10); + uNewDrX |= X86_DR7_RA1_MASK; + uNewDrX &= ~X86_DR7_RAZ_MASK; break; IEM_NOT_REACHED_DEFAULT_CASE_RET(); @@ -3678,7 +4578,7 @@ IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg) else pCtx->dr[iDrReg] = uNewDrX; - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -3697,7 +4597,7 @@ IEM_CIMPL_DEF_1(iemCImpl_invlpg, uint8_t, GCPtrPage) Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM); int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); if (rc == VINF_SUCCESS) return VINF_SUCCESS; @@ -3740,7 +4640,7 @@ IEM_CIMPL_DEF_0(iemCImpl_rdtsc) pIemCpu->fIgnoreRaxRdx = true; #endif - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -3767,6 +4667,11 @@ IEM_CIMPL_DEF_0(iemCImpl_rdmsr) int rc = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u); if (rc != VINF_SUCCESS) { +#ifdef IN_RING3 + static uint32_t s_cTimes = 0; + if (s_cTimes++ < 10) + LogRel(("IEM: rdmsr(%#x) -> GP(0)\n", pCtx->ecx)); +#endif Log(("IEM: rdmsr(%#x) -> GP(0)\n", pCtx->ecx)); AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS); return iemRaiseGeneralProtectionFault0(pIemCpu); @@ -3775,7 +4680,7 @@ IEM_CIMPL_DEF_0(iemCImpl_rdmsr) pCtx->rax = uValue.s.Lo; pCtx->rdx = uValue.s.Hi; - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -3802,15 +4707,30 @@ IEM_CIMPL_DEF_0(iemCImpl_wrmsr) uValue.s.Lo = pCtx->eax; uValue.s.Hi = pCtx->edx; - int rc = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u); + int rc; + if (!IEM_VERIFICATION_ENABLED(pIemCpu)) + rc = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u); + else + { + CPUMCTX CtxTmp = *pCtx; + rc = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u); + PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu)); + *pCtx = *pCtx2; + *pCtx2 = CtxTmp; + } if (rc != VINF_SUCCESS) { +#ifdef IN_RING3 + static uint32_t s_cTimes = 0; + if (s_cTimes++ < 10) + LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo)); +#endif Log(("IEM: wrmsr(%#x,%#x`%08x) -> GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo)); AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS); return iemRaiseGeneralProtectionFault0(pIemCpu); } - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -3837,7 +4757,7 @@ IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg) */ uint32_t u32Value; if (!IEM_VERIFICATION_ENABLED(pIemCpu)) - rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg); + rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, &u32Value, cbReg); else rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg); if (IOM_SUCCESS(rcStrict)) @@ -3849,10 +4769,25 @@ IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg) case 4: pCtx->rax = u32Value; break; default: AssertFailedReturn(VERR_INTERNAL_ERROR_3); } - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); pIemCpu->cPotentialExits++; if (rcStrict != VINF_SUCCESS) rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict); + Assert(rcStrict == VINF_SUCCESS); /* assumed below */ + + /* + * Check for I/O breakpoints. + */ + uint32_t const uDr7 = pCtx->dr[7]; + if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK) + && X86_DR7_ANY_RW_IO(uDr7) + && (pCtx->cr4 & X86_CR4_DE)) + || DBGFBpIsHwIoArmed(IEMCPU_TO_VM(pIemCpu)))) + { + rcStrict = DBGFBpCheckIo(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), pCtx, u16Port, cbReg); + if (rcStrict == VINF_EM_RAW_GUEST_TRAP) + rcStrict = iemRaiseDebugException(pIemCpu); + } } return rcStrict; @@ -3899,15 +4834,30 @@ IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg) default: AssertFailedReturn(VERR_INTERNAL_ERROR_3); } if (!IEM_VERIFICATION_ENABLED(pIemCpu)) - rcStrict = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg); + rcStrict = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, u32Value, cbReg); else rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg); if (IOM_SUCCESS(rcStrict)) { - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); pIemCpu->cPotentialExits++; if (rcStrict != VINF_SUCCESS) rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict); + Assert(rcStrict == VINF_SUCCESS); /* assumed below */ + + /* + * Check for I/O breakpoints. + */ + uint32_t const uDr7 = pCtx->dr[7]; + if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK) + && X86_DR7_ANY_RW_IO(uDr7) + && (pCtx->cr4 & X86_CR4_DE)) + || DBGFBpIsHwIoArmed(IEMCPU_TO_VM(pIemCpu)))) + { + rcStrict = DBGFBpCheckIo(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), pCtx, u16Port, cbReg); + if (rcStrict == VINF_EM_RAW_GUEST_TRAP) + rcStrict = iemRaiseDebugException(pIemCpu); + } } return rcStrict; } @@ -3961,7 +4911,7 @@ IEM_CIMPL_DEF_0(iemCImpl_cli) /* Commit. */ IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld); return VINF_SUCCESS; } @@ -4007,8 +4957,8 @@ IEM_CIMPL_DEF_0(iemCImpl_sti) /* Commit. */ IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl); - iemRegAddToRip(pIemCpu, cbInstr); - if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_VERIFICATION_ENABLED(pIemCpu)) + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); + if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu)) EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip); Log2(("STI: %#x -> %#x\n", fEflOld, fEfl)); return VINF_SUCCESS; @@ -4022,12 +4972,151 @@ IEM_CIMPL_DEF_0(iemCImpl_hlt) { if (pIemCpu->uCpl != 0) return iemRaiseGeneralProtectionFault0(pIemCpu); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_EM_HALT; } /** + * Implements 'MONITOR'. + */ +IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg) +{ + /* + * Permission checks. + */ + if (pIemCpu->uCpl != 0) + { + Log2(("monitor: CPL != 0\n")); + return iemRaiseUndefinedOpcode(pIemCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */ + } + if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_FEATURE_ECX_MONITOR)) + { + Log2(("monitor: Not in CPUID\n")); + return iemRaiseUndefinedOpcode(pIemCpu); + } + + /* + * Gather the operands and validate them. + */ + PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); + RTGCPTR GCPtrMem = pIemCpu->enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax; + uint32_t uEcx = pCtx->ecx; + uint32_t uEdx = pCtx->edx; +/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or + * \#GP first. */ + if (uEcx != 0) + { + Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + + VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem); + if (rcStrict != VINF_SUCCESS) + return rcStrict; + + RTGCPHYS GCPhysMem; + rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem); + if (rcStrict != VINF_SUCCESS) + return rcStrict; + + /* + * Call EM to prepare the monitor/wait. + */ + rcStrict = EMMonitorWaitPrepare(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rax, pCtx->rcx, pCtx->rdx, GCPhysMem); + Assert(rcStrict == VINF_SUCCESS); + + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); + return rcStrict; +} + + +/** + * Implements 'MWAIT'. + */ +IEM_CIMPL_DEF_0(iemCImpl_mwait) +{ + /* + * Permission checks. + */ + if (pIemCpu->uCpl != 0) + { + Log2(("mwait: CPL != 0\n")); + /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check + * EFLAGS.VM then.) */ + return iemRaiseUndefinedOpcode(pIemCpu); + } + if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_FEATURE_ECX_MONITOR)) + { + Log2(("mwait: Not in CPUID\n")); + return iemRaiseUndefinedOpcode(pIemCpu); + } + + /* + * Gather the operands and validate them. + */ + PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); + uint32_t uEax = pCtx->eax; + uint32_t uEcx = pCtx->ecx; + if (uEcx != 0) + { + /* Only supported extension is break on IRQ when IF=0. */ + if (uEcx > 1) + { + Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx)); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + uint32_t fMWaitFeatures = 0; + uint32_t uIgnore = 0; + CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 5, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore); + if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0)) + != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0)) + { + Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx)); + return iemRaiseGeneralProtectionFault0(pIemCpu); + } + } + + /* + * Call EM to prepare the monitor/wait. + */ + VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(IEMCPU_TO_VMCPU(pIemCpu), uEax, uEcx); + + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); + return rcStrict; +} + + +/** + * Implements 'SWAPGS'. + */ +IEM_CIMPL_DEF_0(iemCImpl_swapgs) +{ + Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT); /* Caller checks this. */ + + /* + * Permission checks. + */ + if (pIemCpu->uCpl != 0) + { + Log2(("swapgs: CPL != 0\n")); + return iemRaiseUndefinedOpcode(pIemCpu); + } + + /* + * Do the job. + */ + PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); + uint64_t uOtherGsBase = pCtx->msrKERNELGSBASE; + pCtx->msrKERNELGSBASE = pCtx->gs.u64Base; + pCtx->gs.u64Base = uOtherGsBase; + + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); + return VINF_SUCCESS; +} + + +/** * Implements 'CPUID'. */ IEM_CIMPL_DEF_0(iemCImpl_cpuid) @@ -4040,7 +5129,7 @@ IEM_CIMPL_DEF_0(iemCImpl_cpuid) pCtx->rcx &= UINT32_C(0xffffffff); pCtx->rdx &= UINT32_C(0xffffffff); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -4048,7 +5137,7 @@ IEM_CIMPL_DEF_0(iemCImpl_cpuid) /** * Implements 'AAD'. * - * @param enmEffOpSize The effective operand size. + * @param bImm The immediate operand. */ IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm) { @@ -4061,7 +5150,7 @@ IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm) X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF | X86_EFL_AF | X86_EFL_CF); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -4084,7 +5173,76 @@ IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm) X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF | X86_EFL_AF | X86_EFL_CF); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); + return VINF_SUCCESS; +} + + +/** + * Implements 'DAA'. + */ +IEM_CIMPL_DEF_0(iemCImpl_daa) +{ + PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); + + uint8_t const al = pCtx->al; + bool const fCarry = pCtx->eflags.Bits.u1CF; + + if ( pCtx->eflags.Bits.u1AF + || (al & 0xf) >= 10) + { + pCtx->al = al + 6; + pCtx->eflags.Bits.u1AF = 1; + } + else + pCtx->eflags.Bits.u1AF = 0; + + if (al >= 0x9a || fCarry) + { + pCtx->al += 0x60; + pCtx->eflags.Bits.u1CF = 1; + } + else + pCtx->eflags.Bits.u1CF = 0; + + iemHlpUpdateArithEFlagsU8(pIemCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); + return VINF_SUCCESS; +} + + +/** + * Implements 'DAS'. + */ +IEM_CIMPL_DEF_0(iemCImpl_das) +{ + PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); + + uint8_t const uInputAL = pCtx->al; + bool const fCarry = pCtx->eflags.Bits.u1CF; + + if ( pCtx->eflags.Bits.u1AF + || (uInputAL & 0xf) >= 10) + { + pCtx->eflags.Bits.u1AF = 1; + if (uInputAL < 6) + pCtx->eflags.Bits.u1CF = 1; + pCtx->al = uInputAL - 6; + } + else + { + pCtx->eflags.Bits.u1AF = 0; + pCtx->eflags.Bits.u1CF = 0; + } + + if (uInputAL >= 0x9a || fCarry) + { + pCtx->al -= 0x60; + pCtx->eflags.Bits.u1CF = 1; + } + + iemHlpUpdateArithEFlagsU8(pIemCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -4177,7 +5335,7 @@ IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts) } iemHlpUsedFpu(pIemCpu); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -4274,7 +5432,7 @@ IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, e if (rcStrict != VINF_SUCCESS) return rcStrict; - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -4387,7 +5545,7 @@ IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, return rcStrict; iemHlpUsedFpu(pIemCpu); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -4543,7 +5701,7 @@ IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCP return rcStrict; /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */ - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -4591,7 +5749,7 @@ IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPT pCtx->fpu.FOP = 0; iemHlpUsedFpu(pIemCpu); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -4620,7 +5778,7 @@ IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPT return rcStrict; iemHlpUsedFpu(pIemCpu); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -4655,7 +5813,7 @@ IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPT return rcStrict; iemHlpUsedFpu(pIemCpu); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -4679,7 +5837,7 @@ IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw) /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */ iemHlpUsedFpu(pIemCpu); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -4728,7 +5886,7 @@ IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg) iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); iemHlpUsedFpu(pIemCpu); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -4797,7 +5955,7 @@ IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pf iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); iemHlpUsedFpu(pIemCpu); - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } diff --git a/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h b/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h index 40892880..46932559 100644 --- a/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h +++ b/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h @@ -47,11 +47,20 @@ # define ADDR_rSI rsi # define ADDR_rCX rcx # define ADDR2_TYPE uint64_t +# define IS_64_BIT_CODE(a_pIemCpu) (true) #else # error "Bad ADDR_SIZE." #endif #define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t) +#if ADDR_SIZE == 64 || OP_SIZE == 64 +# define IS_64_BIT_CODE(a_pIemCpu) (true) +#elif ADDR_SIZE == 32 +# define IS_64_BIT_CODE(a_pIemCpu) ((a_pIemCpu)->enmCpuMode == IEMMODE_64BIT) +#else +# define IS_64_BIT_CODE(a_pIemCpu) (false) +#endif + /** * Implements 'REPE CMPS'. @@ -66,16 +75,18 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX; if (uCounterReg == 0) { - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg); - VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg); + uint64_t uSrc1Base; + VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base); if (rcStrict != VINF_SUCCESS) return rcStrict; - rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES); + uint64_t uSrc2Base; + rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base); if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -92,13 +103,8 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8 /* * Do segmentation and virtual page stuff. */ -#if ADDR_SIZE != 64 - ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg; - ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg; -#else - uint64_t uVirtSrc1Addr = uSrc1AddrReg; - uint64_t uVirtSrc2Addr = uSrc2AddrReg; -#endif + ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base; + ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base; uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); if (cLeftSrc1Page > uCounterReg) cLeftSrc1Page = uCounterReg; @@ -106,13 +112,13 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page); if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ - && cbIncr > 0 /** @todo Implement reverse direction string ops. */ -#if ADDR_SIZE != 64 - && uSrc1AddrReg < pSrc1Hid->u32Limit - && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit - && uSrc2AddrReg < pCtx->es.u32Limit - && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit -#endif + && cbIncr > 0 /** @todo Optimize reverse direction string ops. */ + && ( IS_64_BIT_CODE(pIemCpu) + || ( uSrc1AddrReg < pSrc1Hid->u32Limit + && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit + && uSrc2AddrReg < pCtx->es.u32Limit + && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) + ) ) { RTGCPHYS GCPhysSrc1Mem; @@ -173,8 +179,8 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem); continue; } + iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem); } - iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem); } /* @@ -207,7 +213,7 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8 /* * Done. */ - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -225,16 +231,18 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint ADDR_TYPE uCounterReg = pCtx->ADDR_rCX; if (uCounterReg == 0) { - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg); - VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg); + uint64_t uSrc1Base; + VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base); if (rcStrict != VINF_SUCCESS) return rcStrict; - rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES); + uint64_t uSrc2Base; + rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base); if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -251,13 +259,8 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint /* * Do segmentation and virtual page stuff. */ -#if ADDR_SIZE != 64 - ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg; - ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg; -#else - uint64_t uVirtSrc1Addr = uSrc1AddrReg; - uint64_t uVirtSrc2Addr = uSrc2AddrReg; -#endif + ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base; + ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base; uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); if (cLeftSrc1Page > uCounterReg) cLeftSrc1Page = uCounterReg; @@ -265,13 +268,13 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page); if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ - && cbIncr > 0 /** @todo Implement reverse direction string ops. */ -#if ADDR_SIZE != 64 - && uSrc1AddrReg < pSrc1Hid->u32Limit - && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit - && uSrc2AddrReg < pCtx->es.u32Limit - && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit -#endif + && cbIncr > 0 /** @todo Optimize reverse direction string ops. */ + && ( IS_64_BIT_CODE(pIemCpu) + || ( uSrc1AddrReg < pSrc1Hid->u32Limit + && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit + && uSrc2AddrReg < pCtx->es.u32Limit + && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) + ) ) { RTGCPHYS GCPhysSrc1Mem; @@ -366,7 +369,7 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint /* * Done. */ - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -384,11 +387,12 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE)) ADDR_TYPE uCounterReg = pCtx->ADDR_rCX; if (uCounterReg == 0) { - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } - VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES); + uint64_t uBaseAddr; + VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr); if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -405,20 +409,16 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE)) /* * Do segmentation and virtual page stuff. */ -#if ADDR_SIZE != 64 - ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg; -#else - uint64_t uVirtAddr = uAddrReg; -#endif + ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr; uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); if (cLeftPage > uCounterReg) cLeftPage = uCounterReg; if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ && cbIncr > 0 /** @todo Implement reverse direction string ops. */ -#if ADDR_SIZE != 64 - && uAddrReg < pCtx->es.u32Limit - && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit -#endif + && ( IS_64_BIT_CODE(pIemCpu) + || ( uAddrReg < pCtx->es.u32Limit + && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) + ) ) { RTGCPHYS GCPhysMem; @@ -491,7 +491,7 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE)) /* * Done. */ - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -509,11 +509,12 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE)) ADDR_TYPE uCounterReg = pCtx->ADDR_rCX; if (uCounterReg == 0) { - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } - VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES); + uint64_t uBaseAddr; + VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr); if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -530,20 +531,16 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE)) /* * Do segmentation and virtual page stuff. */ -#if ADDR_SIZE != 64 - ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg; -#else - uint64_t uVirtAddr = uAddrReg; -#endif + ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr; uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); if (cLeftPage > uCounterReg) cLeftPage = uCounterReg; if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ && cbIncr > 0 /** @todo Implement reverse direction string ops. */ -#if ADDR_SIZE != 64 - && uAddrReg < pCtx->es.u32Limit - && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit -#endif + && ( IS_64_BIT_CODE(pIemCpu) + || ( uAddrReg < pCtx->es.u32Limit + && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) + ) ) { RTGCPHYS GCPhysMem; @@ -615,7 +612,7 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE)) /* * Done. */ - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -635,16 +632,18 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_ ADDR_TYPE uCounterReg = pCtx->ADDR_rCX; if (uCounterReg == 0) { - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg); - VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg); + uint64_t uSrcBase; + VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uSrcBase); if (rcStrict != VINF_SUCCESS) return rcStrict; - rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES); + uint64_t uDstBase; + rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uDstBase); if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -684,13 +683,8 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_ /* * Do segmentation and virtual page stuff. */ -#if ADDR_SIZE != 64 - ADDR2_TYPE uVirtSrcAddr = (uint32_t)pSrcHid->u64Base + uSrcAddrReg; - ADDR2_TYPE uVirtDstAddr = (uint32_t)pCtx->es.u64Base + uDstAddrReg; -#else - uint64_t uVirtSrcAddr = uSrcAddrReg; - uint64_t uVirtDstAddr = uDstAddrReg; -#endif + ADDR2_TYPE uVirtSrcAddr = uSrcAddrReg + (ADDR2_TYPE)uSrcBase; + ADDR2_TYPE uVirtDstAddr = uDstAddrReg + (ADDR2_TYPE)uDstBase; uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); if (cLeftSrcPage > uCounterReg) cLeftSrcPage = uCounterReg; @@ -699,12 +693,12 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_ if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ && cbIncr > 0 /** @todo Implement reverse direction string ops. */ -#if ADDR_SIZE != 64 - && uSrcAddrReg < pSrcHid->u32Limit - && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit - && uDstAddrReg < pCtx->es.u32Limit - && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit -#endif + && ( IS_64_BIT_CODE(pIemCpu) + || ( uSrcAddrReg < pSrcHid->u32Limit + && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit + && uDstAddrReg < pCtx->es.u32Limit + && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) + ) ) { RTGCPHYS GCPhysSrcMem; @@ -781,7 +775,7 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_ /* * Done. */ - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -799,11 +793,12 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE)) ADDR_TYPE uCounterReg = pCtx->ADDR_rCX; if (uCounterReg == 0) { - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } - VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES); + uint64_t uBaseAddr; + VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr); if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -829,20 +824,16 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE)) /* * Do segmentation and virtual page stuff. */ -#if ADDR_SIZE != 64 - ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg; -#else - uint64_t uVirtAddr = uAddrReg; -#endif + ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr; uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); if (cLeftPage > uCounterReg) cLeftPage = uCounterReg; if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ && cbIncr > 0 /** @todo Implement reverse direction string ops. */ -#if ADDR_SIZE != 64 - && uAddrReg < pCtx->es.u32Limit - && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit -#endif + && ( IS_64_BIT_CODE(pIemCpu) + || ( uAddrReg < pCtx->es.u32Limit + && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) + ) ) { RTGCPHYS GCPhysMem; @@ -904,7 +895,7 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE)) /* * Done. */ - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -922,12 +913,13 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg) ADDR_TYPE uCounterReg = pCtx->ADDR_rCX; if (uCounterReg == 0) { - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg); - VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg); + uint64_t uBaseAddr; + VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uBaseAddr); if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -942,20 +934,16 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg) /* * Do segmentation and virtual page stuff. */ -#if ADDR_SIZE != 64 - ADDR2_TYPE uVirtAddr = (uint32_t)pSrcHid->u64Base + uAddrReg; -#else - uint64_t uVirtAddr = uAddrReg; -#endif + ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr; uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); if (cLeftPage > uCounterReg) cLeftPage = uCounterReg; if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ && cbIncr > 0 /** @todo Implement reverse direction string ops. */ -#if ADDR_SIZE != 64 - && uAddrReg < pSrcHid->u32Limit - && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit -#endif + && ( IS_64_BIT_CODE(pIemCpu) + || ( uAddrReg < pSrcHid->u32Limit + && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit) + ) ) { RTGCPHYS GCPhysMem; @@ -1019,7 +1007,7 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg) /* * Done. */ - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -1029,7 +1017,7 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg) /** * Implements 'INS' (no rep) */ -IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE)) +IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked) { PVM pVM = IEMCPU_TO_VM(pIemCpu); PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); @@ -1049,18 +1037,21 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE)) * segmentation and finally any #PF due to virtual address translation. * ASSUMES nothing is read from the I/O port before traps are taken. */ - rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8); - if (rcStrict != VINF_SUCCESS) - return rcStrict; + if (!fIoChecked) + { + rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8); + if (rcStrict != VINF_SUCCESS) + return rcStrict; + } OP_TYPE *puMem; rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W); if (rcStrict != VINF_SUCCESS) return rcStrict; - uint32_t u32Value; + uint32_t u32Value = 0; if (!IEM_VERIFICATION_ENABLED(pIemCpu)) - rcStrict = IOMIOPortRead(pVM, pCtx->dx, &u32Value, OP_SIZE / 8); + rcStrict = IOMIOPortRead(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, &u32Value, OP_SIZE / 8); else rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8); if (IOM_SUCCESS(rcStrict)) @@ -1072,7 +1063,7 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE)) pCtx->ADDR_rDI += OP_SIZE / 8; else pCtx->ADDR_rDI -= OP_SIZE / 8; - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); } /* iemMemMap already check permissions, so this may only be real errors or access handlers medling. The access handler case is going to @@ -1088,27 +1079,33 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE)) /** * Implements 'REP INS'. */ -IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE)) +IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked) { - PVM pVM = IEMCPU_TO_VM(pIemCpu); - PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); + PVM pVM = IEMCPU_TO_VM(pIemCpu); + PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); + PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); /* * Setup. */ uint16_t const u16Port = pCtx->dx; - VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8); - if (rcStrict != VINF_SUCCESS) - return rcStrict; + VBOXSTRICTRC rcStrict; + if (!fIoChecked) + { + rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8); + if (rcStrict != VINF_SUCCESS) + return rcStrict; + } ADDR_TYPE uCounterReg = pCtx->ADDR_rCX; if (uCounterReg == 0) { - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } - rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES); + uint64_t uBaseAddr; + rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr); if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -1132,20 +1129,16 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE)) /* * Do segmentation and virtual page stuff. */ -#if ADDR_SIZE != 64 - ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg; -#else - uint64_t uVirtAddr = uAddrReg; -#endif + ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr; uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); if (cLeftPage > uCounterReg) cLeftPage = uCounterReg; if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ && cbIncr > 0 /** @todo Implement reverse direction string ops. */ -#if ADDR_SIZE != 64 - && uAddrReg < pCtx->es.u32Limit - && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit -#endif + && ( IS_64_BIT_CODE(pIemCpu) + || ( uAddrReg < pCtx->es.u32Limit + && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) + ) ) { RTGCPHYS GCPhysMem; @@ -1161,7 +1154,7 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE)) */ /** @todo Change the I/O manager interface to make use of * mapped buffers instead of leaving those bits to the - * device implementation? */ + * device implementation! */ PGMPAGEMAPLOCK PgLockMem; OP_TYPE *puMem; rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem); @@ -1172,7 +1165,7 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE)) { uint32_t u32Value; if (!IEM_VERIFICATION_ENABLED(pIemCpu)) - rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8); + rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8); else rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8); if (IOM_SUCCESS(rcStrict)) @@ -1184,9 +1177,11 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE)) if (rcStrict != VINF_SUCCESS) { if (IOM_SUCCESS(rcStrict)) + { rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict); - if (uCounterReg == 0) - iemRegAddToRip(pIemCpu, cbInstr); + if (uCounterReg == 0) + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); + } iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem); return rcStrict; } @@ -1221,9 +1216,9 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE)) if (rcStrict != VINF_SUCCESS) return rcStrict; - uint32_t u32Value; + uint32_t u32Value = 0; if (!IEM_VERIFICATION_ENABLED(pIemCpu)) - rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8); + rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8); else rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8); if (!IOM_SUCCESS(rcStrict)) @@ -1239,10 +1234,9 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE)) cLeftPage--; if (rcStrict != VINF_SUCCESS) { - if (IOM_SUCCESS(rcStrict)) - rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict); if (uCounterReg == 0) - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); + rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict); return rcStrict; } } while ((int32_t)cLeftPage > 0); @@ -1251,7 +1245,7 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE)) /* * Done. */ - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -1259,7 +1253,7 @@ IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE)) /** * Implements 'OUTS' (no rep) */ -IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg) +IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked) { PVM pVM = IEMCPU_TO_VM(pIemCpu); PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); @@ -1270,16 +1264,19 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, i * segmentation and finally any #PF due to virtual address translation. * ASSUMES nothing is read from the I/O port before traps are taken. */ - rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8); - if (rcStrict != VINF_SUCCESS) - return rcStrict; + if (!fIoChecked) + { + rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8); + if (rcStrict != VINF_SUCCESS) + return rcStrict; + } OP_TYPE uValue; rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI); if (rcStrict == VINF_SUCCESS) { if (!IEM_VERIFICATION_ENABLED(pIemCpu)) - rcStrict = IOMIOPortWrite(pVM, pCtx->dx, uValue, OP_SIZE / 8); + rcStrict = IOMIOPortWrite(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, uValue, OP_SIZE / 8); else rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8); if (IOM_SUCCESS(rcStrict)) @@ -1288,7 +1285,7 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, i pCtx->ADDR_rSI += OP_SIZE / 8; else pCtx->ADDR_rSI -= OP_SIZE / 8; - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); if (rcStrict != VINF_SUCCESS) rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict); } @@ -1300,28 +1297,34 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, i /** * Implements 'REP OUTS'. */ -IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg) +IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked) { - PVM pVM = IEMCPU_TO_VM(pIemCpu); - PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); + PVM pVM = IEMCPU_TO_VM(pIemCpu); + PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); + PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); /* * Setup. */ uint16_t const u16Port = pCtx->dx; - VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8); - if (rcStrict != VINF_SUCCESS) - return rcStrict; + VBOXSTRICTRC rcStrict; + if (!fIoChecked) + { + rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8); + if (rcStrict != VINF_SUCCESS) + return rcStrict; + } ADDR_TYPE uCounterReg = pCtx->ADDR_rCX; if (uCounterReg == 0) { - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg); - rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg); + uint64_t uBaseAddr; + rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg, &uBaseAddr); if (rcStrict != VINF_SUCCESS) return rcStrict; @@ -1336,20 +1339,16 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_ /* * Do segmentation and virtual page stuff. */ -#if ADDR_SIZE != 64 - ADDR2_TYPE uVirtAddr = (uint32_t)pHid->u64Base + uAddrReg; -#else - uint64_t uVirtAddr = uAddrReg; -#endif + ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr; uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); if (cLeftPage > uCounterReg) cLeftPage = uCounterReg; if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ && cbIncr > 0 /** @todo Implement reverse direction string ops. */ -#if ADDR_SIZE != 64 - && uAddrReg < pHid->u32Limit - && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit -#endif + && ( IS_64_BIT_CODE(pIemCpu) + || ( uAddrReg < pHid->u32Limit + && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit) + ) ) { RTGCPHYS GCPhysMem; @@ -1376,7 +1375,7 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_ { uint32_t u32Value = *puMem++; if (!IEM_VERIFICATION_ENABLED(pIemCpu)) - rcStrict = IOMIOPortWrite(pVM, u16Port, u32Value, OP_SIZE / 8); + rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, u32Value, OP_SIZE / 8); else rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, OP_SIZE / 8); if (IOM_SUCCESS(rcStrict)) @@ -1387,9 +1386,11 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_ if (rcStrict != VINF_SUCCESS) { if (IOM_SUCCESS(rcStrict)) + { rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict); - if (uCounterReg == 0) - iemRegAddToRip(pIemCpu, cbInstr); + if (uCounterReg == 0) + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); + } iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem); return rcStrict; } @@ -1425,7 +1426,7 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_ return rcStrict; if (!IEM_VERIFICATION_ENABLED(pIemCpu)) - rcStrict = IOMIOPortWrite(pVM, u16Port, uValue, OP_SIZE / 8); + rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8); else rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8); if (IOM_SUCCESS(rcStrict)) @@ -1437,9 +1438,11 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_ if (rcStrict != VINF_SUCCESS) { if (IOM_SUCCESS(rcStrict)) + { + if (uCounterReg == 0) + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict); - if (uCounterReg == 0) - iemRegAddToRip(pIemCpu, cbInstr); + } return rcStrict; } } while ((int32_t)cLeftPage > 0); @@ -1448,7 +1451,7 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_ /* * Done. */ - iemRegAddToRip(pIemCpu, cbInstr); + iemRegAddToRipAndClearRF(pIemCpu, cbInstr); return VINF_SUCCESS; } @@ -1465,4 +1468,4 @@ IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_ #undef ADDR2_TYPE #undef ADDR_TYPE #undef ADDR2_TYPE - +#undef IS_64_BIT_CODE diff --git a/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h b/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h index 5a5553cc..3e6fcfba 100644 --- a/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h +++ b/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2011-2012 Oracle Corporation + * Copyright (C) 2011-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -66,7 +66,7 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_r8, PCIEMOPBINSIZES, pImpl) IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_FETCH_EFLAGS(EFlags); @@ -129,6 +129,8 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl) IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); + if (pImpl != &g_iemAImpl_test) + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); break; @@ -166,7 +168,7 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl) IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_FETCH_EFLAGS(EFlags); @@ -188,7 +190,7 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl) IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_FETCH_EFLAGS(EFlags); @@ -210,7 +212,7 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl) IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_FETCH_EFLAGS(EFlags); @@ -270,7 +272,7 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_r8_rm, PCIEMOPBINSIZES, pImpl) IEM_MC_ARG(uint32_t *, pEFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U8(u8Src, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_REF_EFLAGS(pEFlags); @@ -327,6 +329,7 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl) IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); break; @@ -361,7 +364,7 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl) IEM_MC_ARG(uint32_t *, pEFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_REF_EFLAGS(pEFlags); @@ -378,12 +381,13 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl) IEM_MC_ARG(uint32_t *, pEFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); break; @@ -395,7 +399,7 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl) IEM_MC_ARG(uint32_t *, pEFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_REF_EFLAGS(pEFlags); @@ -479,6 +483,8 @@ FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rAX_Iz, PCIEMOPBINSIZES, pImpl) IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); + if (pImpl != &g_iemAImpl_test) + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); return VINF_SUCCESS; @@ -538,7 +544,7 @@ FNIEMOP_DEF_1(iemOp_Grp6_sldt, uint8_t, bRm) if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) { - IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); switch (pIemCpu->enmEffOpSize) { case IEMMODE_16BIT: @@ -576,8 +582,8 @@ FNIEMOP_DEF_1(iemOp_Grp6_sldt, uint8_t, bRm) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint16_t, u16Ldtr); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); - IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); + IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); IEM_MC_FETCH_LDTR_U16(u16Ldtr); IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Ldtr); IEM_MC_ADVANCE_RIP(); @@ -595,7 +601,7 @@ FNIEMOP_DEF_1(iemOp_Grp6_str, uint8_t, bRm) if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) { - IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); switch (pIemCpu->enmEffOpSize) { case IEMMODE_16BIT: @@ -633,8 +639,8 @@ FNIEMOP_DEF_1(iemOp_Grp6_str, uint8_t, bRm) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint16_t, u16Tr); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); - IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); + IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); IEM_MC_FETCH_TR_U16(u16Tr); IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tr); IEM_MC_ADVANCE_RIP(); @@ -652,7 +658,7 @@ FNIEMOP_DEF_1(iemOp_Grp6_lldt, uint8_t, bRm) if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) { - IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEMOP_HLP_DECODED_NL_1(OP_LLDT, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS); IEM_MC_BEGIN(1, 0); IEM_MC_ARG(uint16_t, u16Sel, 0); IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); @@ -664,9 +670,9 @@ FNIEMOP_DEF_1(iemOp_Grp6_lldt, uint8_t, bRm) IEM_MC_BEGIN(1, 1); IEM_MC_ARG(uint16_t, u16Sel, 0); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); - IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEMOP_HLP_DECODED_NL_1(OP_LLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS); + IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); /** @todo test order */ IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc); IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel); IEM_MC_END(); @@ -695,9 +701,9 @@ FNIEMOP_DEF_1(iemOp_Grp6_ltr, uint8_t, bRm) IEM_MC_BEGIN(1, 1); IEM_MC_ARG(uint16_t, u16Sel, 0); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); /** @todo test ordre */ IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc); IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel); IEM_MC_END(); @@ -706,12 +712,51 @@ FNIEMOP_DEF_1(iemOp_Grp6_ltr, uint8_t, bRm) } +/** Opcode 0x0f 0x00 /3. */ +FNIEMOP_DEF_2(iemOpCommonGrp6VerX, uint8_t, bRm, bool, fWrite) +{ + IEMOP_HLP_NO_REAL_OR_V86_MODE(); + + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + IEMOP_HLP_DECODED_NL_1(fWrite ? OP_VERW : OP_VERR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); + IEM_MC_BEGIN(2, 0); + IEM_MC_ARG(uint16_t, u16Sel, 0); + IEM_MC_ARG_CONST(bool, fWriteArg, fWrite, 1); + IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); + IEM_MC_CALL_CIMPL_2(iemCImpl_VerX, u16Sel, fWriteArg); + IEM_MC_END(); + } + else + { + IEM_MC_BEGIN(2, 1); + IEM_MC_ARG(uint16_t, u16Sel, 0); + IEM_MC_ARG_CONST(bool, fWriteArg, fWrite, 1); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEMOP_HLP_DECODED_NL_1(fWrite ? OP_VERW : OP_VERR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); + IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc); + IEM_MC_CALL_CIMPL_2(iemCImpl_VerX, u16Sel, fWriteArg); + IEM_MC_END(); + } + return VINF_SUCCESS; +} + + /** Opcode 0x0f 0x00 /4. */ -FNIEMOP_STUB_1(iemOp_Grp6_verr, uint8_t, bRm); +FNIEMOP_DEF_1(iemOp_Grp6_verr, uint8_t, bRm) +{ + IEMOP_MNEMONIC("verr Ew"); + return FNIEMOP_CALL_2(iemOpCommonGrp6VerX, bRm, false); +} /** Opcode 0x0f 0x00 /5. */ -FNIEMOP_STUB_1(iemOp_Grp6_verw, uint8_t, bRm); +FNIEMOP_DEF_1(iemOp_Grp6_verw, uint8_t, bRm) +{ + IEMOP_MNEMONIC("verr Ew"); + return FNIEMOP_CALL_2(iemOpCommonGrp6VerX, bRm, true); +} /** Opcode 0x0f 0x00. */ @@ -740,11 +785,12 @@ FNIEMOP_DEF_1(iemOp_Grp7_sgdt, uint8_t, bRm) IEMOP_MNEMONIC("sgdt Ms"); IEMOP_HLP_64BIT_OP_SIZE(); IEM_MC_BEGIN(3, 1); - IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0); + IEM_MC_ARG(uint8_t, iEffSeg, 0); IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1); IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/pIemCpu->enmEffOpSize, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg); IEM_MC_CALL_CIMPL_3(iemCImpl_sgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg); IEM_MC_END(); return VINF_SUCCESS; @@ -789,11 +835,12 @@ FNIEMOP_DEF_1(iemOp_Grp7_sidt, uint8_t, bRm) IEMOP_MNEMONIC("sidt Ms"); IEMOP_HLP_64BIT_OP_SIZE(); IEM_MC_BEGIN(3, 1); - IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0); + IEM_MC_ARG(uint8_t, iEffSeg, 0); IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1); IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/pIemCpu->enmEffOpSize, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg); IEM_MC_CALL_CIMPL_3(iemCImpl_sidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg); IEM_MC_END(); return VINF_SUCCESS; @@ -803,32 +850,33 @@ FNIEMOP_DEF_1(iemOp_Grp7_sidt, uint8_t, bRm) /** Opcode 0x0f 0x01 /1. */ FNIEMOP_DEF(iemOp_Grp7_monitor) { - NOREF(pIemCpu); - IEMOP_BITCH_ABOUT_STUB(); - return VERR_IEM_INSTR_NOT_IMPLEMENTED; + IEMOP_MNEMONIC("monitor"); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); /** @todo Verify that monitor is allergic to lock prefixes. */ + return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_monitor, pIemCpu->iEffSeg); } /** Opcode 0x0f 0x01 /1. */ FNIEMOP_DEF(iemOp_Grp7_mwait) { - NOREF(pIemCpu); - IEMOP_BITCH_ABOUT_STUB(); - return VERR_IEM_INSTR_NOT_IMPLEMENTED; + IEMOP_MNEMONIC("mwait"); /** @todo Verify that mwait is allergic to lock prefixes. */ + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_mwait); } /** Opcode 0x0f 0x01 /2. */ FNIEMOP_DEF_1(iemOp_Grp7_lgdt, uint8_t, bRm) { - IEMOP_HLP_NO_LOCK_PREFIX(); - + IEMOP_MNEMONIC("lgdt"); IEMOP_HLP_64BIT_OP_SIZE(); IEM_MC_BEGIN(3, 1); - IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0); + IEM_MC_ARG(uint8_t, iEffSeg, 0); IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1); IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/pIemCpu->enmEffOpSize, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg); IEM_MC_CALL_CIMPL_3(iemCImpl_lgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg); IEM_MC_END(); return VINF_SUCCESS; @@ -854,16 +902,16 @@ FNIEMOP_DEF(iemOp_Grp7_xsetbv) /** Opcode 0x0f 0x01 /3. */ FNIEMOP_DEF_1(iemOp_Grp7_lidt, uint8_t, bRm) { - IEMOP_HLP_NO_LOCK_PREFIX(); - IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT ? IEMMODE_64BIT : pIemCpu->enmEffOpSize; IEM_MC_BEGIN(3, 1); - IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0); + IEM_MC_ARG(uint8_t, iEffSeg, 0); IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1); IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg); IEM_MC_CALL_CIMPL_3(iemCImpl_lidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg); IEM_MC_END(); return VINF_SUCCESS; @@ -897,6 +945,7 @@ FNIEMOP_UD_STUB(iemOp_Grp7_Amd_invlpga); /** Opcode 0x0f 0x01 /4. */ FNIEMOP_DEF_1(iemOp_Grp7_smsw, uint8_t, bRm) { + IEMOP_MNEMONIC("smsw"); IEMOP_HLP_NO_LOCK_PREFIX(); if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) { @@ -938,7 +987,7 @@ FNIEMOP_DEF_1(iemOp_Grp7_smsw, uint8_t, bRm) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint16_t, u16Tmp); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_CR0_U16(u16Tmp); IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp); IEM_MC_ADVANCE_RIP(); @@ -953,6 +1002,7 @@ FNIEMOP_DEF_1(iemOp_Grp7_lmsw, uint8_t, bRm) { /* The operand size is effectively ignored, all is 16-bit and only the lower 3-bits are used. */ + IEMOP_MNEMONIC("lmsw"); IEMOP_HLP_NO_LOCK_PREFIX(); if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) { @@ -967,7 +1017,7 @@ FNIEMOP_DEF_1(iemOp_Grp7_lmsw, uint8_t, bRm) IEM_MC_BEGIN(1, 1); IEM_MC_ARG(uint16_t, u16Tmp, 0); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp); IEM_MC_END(); @@ -979,10 +1029,11 @@ FNIEMOP_DEF_1(iemOp_Grp7_lmsw, uint8_t, bRm) /** Opcode 0x0f 0x01 /7. */ FNIEMOP_DEF_1(iemOp_Grp7_invlpg, uint8_t, bRm) { + IEMOP_MNEMONIC("invlpg"); IEMOP_HLP_NO_LOCK_PREFIX(); IEM_MC_BEGIN(1, 1); IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 0); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_CALL_CIMPL_1(iemCImpl_invlpg, GCPtrEffDst); IEM_MC_END(); return VINF_SUCCESS; @@ -992,9 +1043,10 @@ FNIEMOP_DEF_1(iemOp_Grp7_invlpg, uint8_t, bRm) /** Opcode 0x0f 0x01 /7. */ FNIEMOP_DEF(iemOp_Grp7_swapgs) { - NOREF(pIemCpu); - IEMOP_BITCH_ABOUT_STUB(); - return VERR_IEM_INSTR_NOT_IMPLEMENTED; + IEMOP_MNEMONIC("swapgs"); + IEMOP_HLP_NO_LOCK_PREFIX(); + IEMOP_HLP_ONLY_64BIT(); + return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_swapgs); } @@ -1084,13 +1136,132 @@ FNIEMOP_DEF(iemOp_Grp7) } } +/** Opcode 0x0f 0x00 /3. */ +FNIEMOP_DEF_1(iemOpCommonLarLsl_Gv_Ew, bool, fIsLar) +{ + IEMOP_HLP_NO_REAL_OR_V86_MODE(); + uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); + + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_REG, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); + switch (pIemCpu->enmEffOpSize) + { + case IEMMODE_16BIT: + { + IEM_MC_BEGIN(4, 0); + IEM_MC_ARG(uint16_t *, pu16Dst, 0); + IEM_MC_ARG(uint16_t, u16Sel, 1); + IEM_MC_ARG(uint32_t *, pEFlags, 2); + IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3); + + IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); + IEM_MC_REF_EFLAGS(pEFlags); + IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u16, pu16Dst, u16Sel, pEFlags, fIsLarArg); + + IEM_MC_END(); + return VINF_SUCCESS; + } + + case IEMMODE_32BIT: + case IEMMODE_64BIT: + { + IEM_MC_BEGIN(4, 0); + IEM_MC_ARG(uint64_t *, pu64Dst, 0); + IEM_MC_ARG(uint16_t, u16Sel, 1); + IEM_MC_ARG(uint32_t *, pEFlags, 2); + IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3); + + IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); + IEM_MC_REF_EFLAGS(pEFlags); + IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u64, pu64Dst, u16Sel, pEFlags, fIsLarArg); + + IEM_MC_END(); + return VINF_SUCCESS; + } + + IEM_NOT_REACHED_DEFAULT_CASE_RET(); + } + } + else + { + switch (pIemCpu->enmEffOpSize) + { + case IEMMODE_16BIT: + { + IEM_MC_BEGIN(4, 1); + IEM_MC_ARG(uint16_t *, pu16Dst, 0); + IEM_MC_ARG(uint16_t, u16Sel, 1); + IEM_MC_ARG(uint32_t *, pEFlags, 2); + IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_MEM, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); + + IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc); + IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_REF_EFLAGS(pEFlags); + IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u16, pu16Dst, u16Sel, pEFlags, fIsLarArg); + + IEM_MC_END(); + return VINF_SUCCESS; + } + + case IEMMODE_32BIT: + case IEMMODE_64BIT: + { + IEM_MC_BEGIN(4, 1); + IEM_MC_ARG(uint64_t *, pu64Dst, 0); + IEM_MC_ARG(uint16_t, u16Sel, 1); + IEM_MC_ARG(uint32_t *, pEFlags, 2); + IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_MEM, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); + + IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc); + IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_REF_EFLAGS(pEFlags); + IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u64, pu64Dst, u16Sel, pEFlags, fIsLarArg); + + IEM_MC_END(); + return VINF_SUCCESS; + } + + IEM_NOT_REACHED_DEFAULT_CASE_RET(); + } + } +} + + /** Opcode 0x0f 0x02. */ -FNIEMOP_STUB(iemOp_lar_Gv_Ew); +FNIEMOP_DEF(iemOp_lar_Gv_Ew) +{ + IEMOP_MNEMONIC("lar Gv,Ew"); + return FNIEMOP_CALL_1(iemOpCommonLarLsl_Gv_Ew, true); +} + + /** Opcode 0x0f 0x03. */ -FNIEMOP_STUB(iemOp_lsl_Gv_Ew); +FNIEMOP_DEF(iemOp_lsl_Gv_Ew) +{ + IEMOP_MNEMONIC("lsl Gv,Ew"); + return FNIEMOP_CALL_1(iemOpCommonLarLsl_Gv_Ew, false); +} + + /** Opcode 0x0f 0x04. */ -FNIEMOP_STUB(iemOp_syscall); +FNIEMOP_DEF(iemOp_syscall) +{ + IEMOP_MNEMONIC("syscall"); + IEMOP_HLP_NO_LOCK_PREFIX(); + return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_syscall); +} /** Opcode 0x0f 0x05. */ @@ -1103,7 +1274,14 @@ FNIEMOP_DEF(iemOp_clts) /** Opcode 0x0f 0x06. */ -FNIEMOP_STUB(iemOp_sysret); +FNIEMOP_DEF(iemOp_sysret) +{ + IEMOP_MNEMONIC("sysret"); + IEMOP_HLP_NO_LOCK_PREFIX(); + return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sysret); +} + + /** Opcode 0x0f 0x08. */ FNIEMOP_STUB(iemOp_invd); @@ -1158,7 +1336,7 @@ FNIEMOP_DEF(iemOp_nop_Ev_GrpP) IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); /* Currently a NOP. */ IEM_MC_ADVANCE_RIP(); IEM_MC_END(); @@ -1291,17 +1469,17 @@ FNIEMOP_STUB(iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd) /** Opcode 0x0f 0x11. */ FNIEMOP_STUB(iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd); /** Opcode 0x0f 0x12. */ -FNIEMOP_STUB(iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq); +FNIEMOP_STUB(iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq); //NEXT /** Opcode 0x0f 0x13. */ -FNIEMOP_STUB(iemOp_movlps_Mq_Vq__movlpd_Mq_Vq); +FNIEMOP_STUB(iemOp_movlps_Mq_Vq__movlpd_Mq_Vq); //NEXT /** Opcode 0x0f 0x14. */ FNIEMOP_STUB(iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq); /** Opcode 0x0f 0x15. */ FNIEMOP_STUB(iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq); /** Opcode 0x0f 0x16. */ -FNIEMOP_STUB(iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq); +FNIEMOP_STUB(iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq); //NEXT /** Opcode 0x0f 0x17. */ -FNIEMOP_STUB(iemOp_movhps_Mq_Vq__movhpd_Mq_Vq); +FNIEMOP_STUB(iemOp_movhps_Mq_Vq__movhpd_Mq_Vq); //NEXT /** Opcode 0x0f 0x18. */ @@ -1326,7 +1504,7 @@ FNIEMOP_DEF(iemOp_prefetch_Grp16) IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); /* Currently a NOP. */ IEM_MC_ADVANCE_RIP(); IEM_MC_END(); @@ -1352,7 +1530,7 @@ FNIEMOP_DEF(iemOp_nop_Ev) { IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); /* Currently a NOP. */ IEM_MC_ADVANCE_RIP(); IEM_MC_END(); @@ -1377,7 +1555,7 @@ FNIEMOP_DEF(iemOp_mov_Rd_Cd) { /* The lock prefix can be used to encode CR8 accesses on some CPUs. */ if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L)) - return IEMOP_RAISE_INVALID_LOCK_PREFIX(); /* #UD takes precedence over #GP(), see test. */ + return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */ iCrReg |= 8; } switch (iCrReg) @@ -1423,7 +1601,7 @@ FNIEMOP_DEF(iemOp_mov_Cd_Rd) { /* The lock prefix can be used to encode CR8 accesses on some CPUs. */ if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L)) - return IEMOP_RAISE_INVALID_LOCK_PREFIX(); /* #UD takes precedence over #GP(), see test. */ + return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */ iCrReg |= 8; } switch (iCrReg) @@ -1476,15 +1654,15 @@ FNIEMOP_STUB(iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd); /** Opcode 0x0f 0x29. */ FNIEMOP_STUB(iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd); /** Opcode 0x0f 0x2a. */ -FNIEMOP_STUB(iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey); +FNIEMOP_STUB(iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey); //NEXT /** Opcode 0x0f 0x2b. */ -FNIEMOP_STUB(iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd); +FNIEMOP_STUB(iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd); //NEXT:XP /** Opcode 0x0f 0x2c. */ -FNIEMOP_STUB(iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd); +FNIEMOP_STUB(iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd); //NEXT /** Opcode 0x0f 0x2d. */ FNIEMOP_STUB(iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd); /** Opcode 0x0f 0x2e. */ -FNIEMOP_STUB(iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd); +FNIEMOP_STUB(iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd); //NEXT /** Opcode 0x0f 0x2f. */ FNIEMOP_STUB(iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd); @@ -1591,7 +1769,7 @@ FNIEMOP_STUB(iemOp_movnti_Gv_Ev); IEM_MC_BEGIN(0, 2); \ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \ IEM_MC_LOCAL(uint16_t, u16Tmp); \ - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \ + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \ IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \ a_Cnd { \ IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \ @@ -1604,7 +1782,7 @@ FNIEMOP_STUB(iemOp_movnti_Gv_Ev); IEM_MC_BEGIN(0, 2); \ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \ IEM_MC_LOCAL(uint32_t, u32Tmp); \ - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \ + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \ IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \ a_Cnd { \ IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \ @@ -1619,7 +1797,7 @@ FNIEMOP_STUB(iemOp_movnti_Gv_Ev); IEM_MC_BEGIN(0, 2); \ IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \ IEM_MC_LOCAL(uint64_t, u64Tmp); \ - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \ + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \ IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \ a_Cnd { \ IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \ @@ -1780,9 +1958,9 @@ FNIEMOP_STUB(iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd); /** Opcode 0x0f 0x57. */ FNIEMOP_STUB(iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd); /** Opcode 0x0f 0x58. */ -FNIEMOP_STUB(iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd); +FNIEMOP_STUB(iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd); //NEXT /** Opcode 0x0f 0x59. */ -FNIEMOP_STUB(iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd); +FNIEMOP_STUB(iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd);//NEXT /** Opcode 0x0f 0x5a. */ FNIEMOP_STUB(iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd); /** Opcode 0x0f 0x5b. */ @@ -1795,12 +1973,140 @@ FNIEMOP_STUB(iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd); FNIEMOP_STUB(iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd); /** Opcode 0x0f 0x5f. */ FNIEMOP_STUB(iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd); + + +/** + * Common worker for SSE2 and MMX instructions on the forms: + * pxxxx xmm1, xmm2/mem128 + * pxxxx mm1, mm2/mem32 + * + * The 2nd operand is the first half of a register, which in the memory case + * means a 32-bit memory access for MMX and 128-bit aligned 64-bit or 128-bit + * memory accessed for MMX. + * + * Exceptions type 4. + */ +FNIEMOP_DEF_1(iemOpCommonMmxSse_LowLow_To_Full, PCIEMOPMEDIAF1L1, pImpl) +{ + uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); + switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) + { + case IEM_OP_PRF_SIZE_OP: /* SSE */ + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* + * Register, register. + */ + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_BEGIN(2, 0); + IEM_MC_ARG(uint128_t *, pDst, 0); + IEM_MC_ARG(uint64_t const *, pSrc, 1); + IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); + IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_REF_XREG_U64_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); + IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* + * Register, memory. + */ + IEM_MC_BEGIN(2, 2); + IEM_MC_ARG(uint128_t *, pDst, 0); + IEM_MC_LOCAL(uint64_t, uSrc); + IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); + IEM_MC_FETCH_MEM_U64_ALIGN_U128(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); + + IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); + + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; + + case 0: /* MMX */ + if (!pImpl->pfnU64) + return IEMOP_RAISE_INVALID_OPCODE(); + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* + * Register, register. + */ + /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */ + /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */ + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_BEGIN(2, 0); + IEM_MC_ARG(uint64_t *, pDst, 0); + IEM_MC_ARG(uint32_t const *, pSrc, 1); + IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); + IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); + IEM_MC_REF_MREG_U32_CONST(pSrc, bRm & X86_MODRM_RM_MASK); + IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* + * Register, memory. + */ + IEM_MC_BEGIN(2, 2); + IEM_MC_ARG(uint64_t *, pDst, 0); + IEM_MC_LOCAL(uint32_t, uSrc); + IEM_MC_ARG_LOCAL_REF(uint32_t const *, pSrc, uSrc, 1); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); + IEM_MC_FETCH_MEM_U32(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); + + IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); + IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); + + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; + + default: + return IEMOP_RAISE_INVALID_OPCODE(); + } +} + + /** Opcode 0x0f 0x60. */ -FNIEMOP_STUB(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq); +FNIEMOP_DEF(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq) +{ + IEMOP_MNEMONIC("punpcklbw"); + return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklbw); +} + + /** Opcode 0x0f 0x61. */ -FNIEMOP_STUB(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq); +FNIEMOP_DEF(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq) +{ + IEMOP_MNEMONIC("punpcklwd"); /** @todo AMD mark the MMX version as 3DNow!. Intel says MMX CPUID req. */ + return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklwd); +} + + /** Opcode 0x0f 0x62. */ -FNIEMOP_STUB(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq); +FNIEMOP_DEF(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq) +{ + IEMOP_MNEMONIC("punpckldq"); + return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpckldq); +} + + /** Opcode 0x0f 0x63. */ FNIEMOP_STUB(iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq); /** Opcode 0x0f 0x64. */ @@ -1811,24 +2117,481 @@ FNIEMOP_STUB(iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq); FNIEMOP_STUB(iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq); /** Opcode 0x0f 0x67. */ FNIEMOP_STUB(iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq); + + +/** + * Common worker for SSE2 and MMX instructions on the forms: + * pxxxx xmm1, xmm2/mem128 + * pxxxx mm1, mm2/mem64 + * + * The 2nd operand is the second half of a register, which in the memory case + * means a 64-bit memory access for MMX, and for MMX a 128-bit aligned access + * where it may read the full 128 bits or only the upper 64 bits. + * + * Exceptions type 4. + */ +FNIEMOP_DEF_1(iemOpCommonMmxSse_HighHigh_To_Full, PCIEMOPMEDIAF1H1, pImpl) +{ + uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); + switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) + { + case IEM_OP_PRF_SIZE_OP: /* SSE */ + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* + * Register, register. + */ + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_BEGIN(2, 0); + IEM_MC_ARG(uint128_t *, pDst, 0); + IEM_MC_ARG(uint128_t const *, pSrc, 1); + IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); + IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); + IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* + * Register, memory. + */ + IEM_MC_BEGIN(2, 2); + IEM_MC_ARG(uint128_t *, pDst, 0); + IEM_MC_LOCAL(uint128_t, uSrc); + IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); + IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); /* Most CPUs probably only right high qword */ + + IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); + + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; + + case 0: /* MMX */ + if (!pImpl->pfnU64) + return IEMOP_RAISE_INVALID_OPCODE(); + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* + * Register, register. + */ + /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */ + /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */ + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_BEGIN(2, 0); + IEM_MC_ARG(uint64_t *, pDst, 0); + IEM_MC_ARG(uint64_t const *, pSrc, 1); + IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); + IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); + IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK); + IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* + * Register, memory. + */ + IEM_MC_BEGIN(2, 2); + IEM_MC_ARG(uint64_t *, pDst, 0); + IEM_MC_LOCAL(uint64_t, uSrc); + IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); + IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); + + IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); + IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); + + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; + + default: + return IEMOP_RAISE_INVALID_OPCODE(); + } +} + + /** Opcode 0x0f 0x68. */ -FNIEMOP_STUB(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq); +FNIEMOP_DEF(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq) +{ + IEMOP_MNEMONIC("punpckhbw"); + return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhbw); +} + + /** Opcode 0x0f 0x69. */ -FNIEMOP_STUB(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq); +FNIEMOP_DEF(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq) +{ + IEMOP_MNEMONIC("punpckhwd"); + return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhwd); +} + + /** Opcode 0x0f 0x6a. */ -FNIEMOP_STUB(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq); +FNIEMOP_DEF(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq) +{ + IEMOP_MNEMONIC("punpckhdq"); + return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhdq); +} + /** Opcode 0x0f 0x6b. */ FNIEMOP_STUB(iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq); + + /** Opcode 0x0f 0x6c. */ -FNIEMOP_STUB(iemOp_punpcklqdq_Vdq_Wdq); +FNIEMOP_DEF(iemOp_punpcklqdq_Vdq_Wdq) +{ + IEMOP_MNEMONIC("punpcklqdq"); + return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklqdq); +} + + /** Opcode 0x0f 0x6d. */ -FNIEMOP_STUB(iemOp_punpckhqdq_Vdq_Wdq); +FNIEMOP_DEF(iemOp_punpckhqdq_Vdq_Wdq) +{ + IEMOP_MNEMONIC("punpckhqdq"); + return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhqdq); +} + + /** Opcode 0x0f 0x6e. */ -FNIEMOP_STUB(iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey); +FNIEMOP_DEF(iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey) +{ + uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); + switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) + { + case IEM_OP_PRF_SIZE_OP: /* SSE */ + IEMOP_MNEMONIC("movd/q Wd/q,Ed/q"); + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* XMM, greg*/ + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_BEGIN(0, 1); + IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); + if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W) + { + IEM_MC_LOCAL(uint64_t, u64Tmp); + IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); + IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); + } + else + { + IEM_MC_LOCAL(uint32_t, u32Tmp); + IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); + IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); + } + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* XMM, [mem] */ + IEM_MC_BEGIN(0, 2); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W) + { + IEM_MC_LOCAL(uint64_t, u64Tmp); + IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); + IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); + } + else + { + IEM_MC_LOCAL(uint32_t, u32Tmp); + IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); + IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); + } + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; + + case 0: /* MMX */ + IEMOP_MNEMONIC("movq/d Pd/q,Ed/q"); + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* MMX, greg */ + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_BEGIN(0, 1); + IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); + IEM_MC_LOCAL(uint64_t, u64Tmp); + if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W) + IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); + else + IEM_MC_FETCH_GREG_U32_ZX_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); + IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* MMX, [mem] */ + IEM_MC_BEGIN(0, 2); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W) + { + IEM_MC_LOCAL(uint64_t, u64Tmp); + IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); + IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp); + } + else + { + IEM_MC_LOCAL(uint32_t, u32Tmp); + IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); + IEM_MC_STORE_MREG_U32_ZX_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u32Tmp); + } + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; + + default: + return IEMOP_RAISE_INVALID_OPCODE(); + } +} + + /** Opcode 0x0f 0x6f. */ -FNIEMOP_STUB(iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq); -/** Opcode 0x0f 0x70. */ -FNIEMOP_STUB(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib); +FNIEMOP_DEF(iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq) +{ + bool fAligned = false; + uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); + switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) + { + case IEM_OP_PRF_SIZE_OP: /* SSE aligned */ + fAligned = true; + case IEM_OP_PRF_REPZ: /* SSE unaligned */ + if (fAligned) + IEMOP_MNEMONIC("movdqa Vdq,Wdq"); + else + IEMOP_MNEMONIC("movdqu Vdq,Wdq"); + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* + * Register, register. + */ + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_BEGIN(0, 1); + IEM_MC_LOCAL(uint128_t, u128Tmp); + IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); + IEM_MC_FETCH_XREG_U128(u128Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); + IEM_MC_STORE_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u128Tmp); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* + * Register, memory. + */ + IEM_MC_BEGIN(0, 2); + IEM_MC_LOCAL(uint128_t, u128Tmp); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); + if (fAligned) + IEM_MC_FETCH_MEM_U128_ALIGN_SSE(u128Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); + else + IEM_MC_FETCH_MEM_U128(u128Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); + IEM_MC_STORE_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u128Tmp); + + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; + + case 0: /* MMX */ + IEMOP_MNEMONIC("movq Pq,Qq"); + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* + * Register, register. + */ + /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */ + /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */ + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_BEGIN(0, 1); + IEM_MC_LOCAL(uint64_t, u64Tmp); + IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); + IEM_MC_FETCH_MREG_U64(u64Tmp, bRm & X86_MODRM_RM_MASK); + IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* + * Register, memory. + */ + IEM_MC_BEGIN(0, 2); + IEM_MC_LOCAL(uint64_t, u64Tmp); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); + IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); + IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp); + + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; + + default: + return IEMOP_RAISE_INVALID_OPCODE(); + } +} + + +/** Opcode 0x0f 0x70. The immediate here is evil! */ +FNIEMOP_DEF(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib) +{ + uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); + switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) + { + case IEM_OP_PRF_SIZE_OP: /* SSE */ + case IEM_OP_PRF_REPNZ: /* SSE */ + case IEM_OP_PRF_REPZ: /* SSE */ + { + PFNIEMAIMPLMEDIAPSHUF pfnAImpl; + switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) + { + case IEM_OP_PRF_SIZE_OP: + IEMOP_MNEMONIC("pshufd Vdq,Wdq,Ib"); + pfnAImpl = iemAImpl_pshufd; + break; + case IEM_OP_PRF_REPNZ: + IEMOP_MNEMONIC("pshuflw Vdq,Wdq,Ib"); + pfnAImpl = iemAImpl_pshuflw; + break; + case IEM_OP_PRF_REPZ: + IEMOP_MNEMONIC("pshufhw Vdq,Wdq,Ib"); + pfnAImpl = iemAImpl_pshufhw; + break; + IEM_NOT_REACHED_DEFAULT_CASE_RET(); + } + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* + * Register, register. + */ + uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + + IEM_MC_BEGIN(3, 0); + IEM_MC_ARG(uint128_t *, pDst, 0); + IEM_MC_ARG(uint128_t const *, pSrc, 1); + IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2); + IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); + IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); + IEM_MC_CALL_SSE_AIMPL_3(pfnAImpl, pDst, pSrc, bEvilArg); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* + * Register, memory. + */ + IEM_MC_BEGIN(3, 2); + IEM_MC_ARG(uint128_t *, pDst, 0); + IEM_MC_LOCAL(uint128_t, uSrc); + IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil); + IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); + + IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); + IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_CALL_SSE_AIMPL_3(pfnAImpl, pDst, pSrc, bEvilArg); + + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; + } + + case 0: /* MMX Extension */ + IEMOP_MNEMONIC("pshufw Pq,Qq,Ib"); + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* + * Register, register. + */ + uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + + IEM_MC_BEGIN(3, 0); + IEM_MC_ARG(uint64_t *, pDst, 0); + IEM_MC_ARG(uint64_t const *, pSrc, 1); + IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2); + IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT(); + IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); + IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK); + IEM_MC_CALL_MMX_AIMPL_3(iemAImpl_pshufw, pDst, pSrc, bEvilArg); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* + * Register, memory. + */ + IEM_MC_BEGIN(3, 2); + IEM_MC_ARG(uint64_t *, pDst, 0); + IEM_MC_LOCAL(uint64_t, uSrc); + IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil); + IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT(); + + IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); + IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); + IEM_MC_CALL_MMX_AIMPL_3(iemAImpl_pshufw, pDst, pSrc, bEvilArg); + + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; + + default: + return IEMOP_RAISE_INVALID_OPCODE(); + } +} + /** Opcode 0x0f 0x71 11/2. */ FNIEMOP_STUB_1(iemOp_Grp12_psrlw_Nq_Ib, uint8_t, bRm); @@ -1947,7 +2710,7 @@ FNIEMOP_STUB_1(iemOp_Grp14_psrlq_Nq_Ib, uint8_t, bRm); FNIEMOP_STUB_1(iemOp_Grp14_psrlq_Udq_Ib, uint8_t, bRm); /** Opcode 0x66 0x0f 0x73 11/3. */ -FNIEMOP_STUB_1(iemOp_Grp14_psrldq_Udq_Ib, uint8_t, bRm); +FNIEMOP_STUB_1(iemOp_Grp14_psrldq_Udq_Ib, uint8_t, bRm); //NEXT /** Opcode 0x0f 0x73 11/6. */ FNIEMOP_STUB_1(iemOp_Grp14_psllq_Nq_Ib, uint8_t, bRm); @@ -1956,7 +2719,7 @@ FNIEMOP_STUB_1(iemOp_Grp14_psllq_Nq_Ib, uint8_t, bRm); FNIEMOP_STUB_1(iemOp_Grp14_psllq_Udq_Ib, uint8_t, bRm); /** Opcode 0x66 0x0f 0x73 11/7. */ -FNIEMOP_STUB_1(iemOp_Grp14_pslldq_Udq_Ib, uint8_t, bRm); +FNIEMOP_STUB_1(iemOp_Grp14_pslldq_Udq_Ib, uint8_t, bRm); //NEXT /** Opcode 0x0f 0x73. */ @@ -2000,12 +2763,133 @@ FNIEMOP_DEF(iemOp_Grp14) } +/** + * Common worker for SSE2 and MMX instructions on the forms: + * pxxx mm1, mm2/mem64 + * pxxx xmm1, xmm2/mem128 + * + * Proper alignment of the 128-bit operand is enforced. + * Exceptions type 4. SSE2 and MMX cpuid checks. + */ +FNIEMOP_DEF_1(iemOpCommonMmxSse2_FullFull_To_Full, PCIEMOPMEDIAF2, pImpl) +{ + uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); + switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) + { + case IEM_OP_PRF_SIZE_OP: /* SSE */ + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* + * Register, register. + */ + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_BEGIN(2, 0); + IEM_MC_ARG(uint128_t *, pDst, 0); + IEM_MC_ARG(uint128_t const *, pSrc, 1); + IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); + IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); + IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* + * Register, memory. + */ + IEM_MC_BEGIN(2, 2); + IEM_MC_ARG(uint128_t *, pDst, 0); + IEM_MC_LOCAL(uint128_t, uSrc); + IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); + IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); + + IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); + + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; + + case 0: /* MMX */ + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* + * Register, register. + */ + /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */ + /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */ + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_BEGIN(2, 0); + IEM_MC_ARG(uint64_t *, pDst, 0); + IEM_MC_ARG(uint64_t const *, pSrc, 1); + IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); + IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); + IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK); + IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* + * Register, memory. + */ + IEM_MC_BEGIN(2, 2); + IEM_MC_ARG(uint64_t *, pDst, 0); + IEM_MC_LOCAL(uint64_t, uSrc); + IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); + IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); + + IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); + IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); + + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; + + default: + return IEMOP_RAISE_INVALID_OPCODE(); + } +} + + /** Opcode 0x0f 0x74. */ -FNIEMOP_STUB(iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq); +FNIEMOP_DEF(iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq) +{ + IEMOP_MNEMONIC("pcmpeqb"); + return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqb); +} + + /** Opcode 0x0f 0x75. */ -FNIEMOP_STUB(iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq); +FNIEMOP_DEF(iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq) +{ + IEMOP_MNEMONIC("pcmpeqw"); + return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqw); +} + + /** Opcode 0x0f 0x76. */ -FNIEMOP_STUB(iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq); +FNIEMOP_DEF(iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq) +{ + IEMOP_MNEMONIC("pcmpeqd"); + return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqd); +} + + /** Opcode 0x0f 0x77. */ FNIEMOP_STUB(iemOp_emms); /** Opcode 0x0f 0x78. */ @@ -2016,10 +2900,211 @@ FNIEMOP_UD_STUB(iemOp_vmwrite); FNIEMOP_STUB(iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps); /** Opcode 0x0f 0x7d. */ FNIEMOP_STUB(iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps); + + /** Opcode 0x0f 0x7e. */ -FNIEMOP_STUB(iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq); +FNIEMOP_DEF(iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq) +{ + uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); + switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) + { + case IEM_OP_PRF_SIZE_OP: /* SSE */ + IEMOP_MNEMONIC("movd/q Ed/q,Wd/q"); + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* greg, XMM */ + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_BEGIN(0, 1); + IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); + if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W) + { + IEM_MC_LOCAL(uint64_t, u64Tmp); + IEM_MC_FETCH_XREG_U64(u64Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp); + } + else + { + IEM_MC_LOCAL(uint32_t, u32Tmp); + IEM_MC_FETCH_XREG_U32(u32Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp); + } + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* [mem], XMM */ + IEM_MC_BEGIN(0, 2); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W) + { + IEM_MC_LOCAL(uint64_t, u64Tmp); + IEM_MC_FETCH_XREG_U64(u64Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp); + } + else + { + IEM_MC_LOCAL(uint32_t, u32Tmp); + IEM_MC_FETCH_XREG_U32(u32Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffSrc, u32Tmp); + } + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; + + case 0: /* MMX */ + IEMOP_MNEMONIC("movq/d Ed/q,Pd/q"); + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* greg, MMX */ + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_BEGIN(0, 1); + IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); + if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W) + { + IEM_MC_LOCAL(uint64_t, u64Tmp); + IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); + IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp); + } + else + { + IEM_MC_LOCAL(uint32_t, u32Tmp); + IEM_MC_FETCH_MREG_U32(u32Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); + IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp); + } + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* [mem], MMX */ + IEM_MC_BEGIN(0, 2); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W) + { + IEM_MC_LOCAL(uint64_t, u64Tmp); + IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); + IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp); + } + else + { + IEM_MC_LOCAL(uint32_t, u32Tmp); + IEM_MC_FETCH_MREG_U32(u32Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); + IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffSrc, u32Tmp); + } + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; + + default: + return IEMOP_RAISE_INVALID_OPCODE(); + } +} + + /** Opcode 0x0f 0x7f. */ -FNIEMOP_STUB(iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq); +FNIEMOP_DEF(iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq) +{ + uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); + bool fAligned = false; + switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) + { + case IEM_OP_PRF_SIZE_OP: /* SSE aligned */ + fAligned = true; + case IEM_OP_PRF_REPZ: /* SSE unaligned */ + if (fAligned) + IEMOP_MNEMONIC("movdqa Wdq,Vdq"); + else + IEMOP_MNEMONIC("movdqu Wdq,Vdq"); + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* + * Register, register. + */ + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_BEGIN(0, 1); + IEM_MC_LOCAL(uint128_t, u128Tmp); + IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); + IEM_MC_FETCH_XREG_U128(u128Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_STORE_XREG_U128((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u128Tmp); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* + * Register, memory. + */ + IEM_MC_BEGIN(0, 2); + IEM_MC_LOCAL(uint128_t, u128Tmp); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); + IEM_MC_FETCH_XREG_U128(u128Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + if (fAligned) + IEM_MC_STORE_MEM_U128_ALIGN_SSE(pIemCpu->iEffSeg, GCPtrEffSrc, u128Tmp); + else + IEM_MC_STORE_MEM_U128(pIemCpu->iEffSeg, GCPtrEffSrc, u128Tmp); + + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; + + case 0: /* MMX */ + IEMOP_MNEMONIC("movq Qq,Pq"); + + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* + * Register, register. + */ + /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */ + /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */ + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_BEGIN(0, 1); + IEM_MC_LOCAL(uint64_t, u64Tmp); + IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); + IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); + IEM_MC_STORE_MREG_U64(bRm & X86_MODRM_RM_MASK, u64Tmp); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* + * Register, memory. + */ + IEM_MC_BEGIN(0, 2); + IEM_MC_LOCAL(uint64_t, u64Tmp); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); + IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); + IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp); + + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; + + default: + return IEMOP_RAISE_INVALID_OPCODE(); + } +} + /** Opcode 0x0f 0x80. */ @@ -2609,7 +3694,7 @@ FNIEMOP_DEF(iemOp_seto_Eb) /* memory target */ IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) { IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1); } IEM_MC_ELSE() { @@ -2649,7 +3734,7 @@ FNIEMOP_DEF(iemOp_setno_Eb) /* memory target */ IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) { IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0); } IEM_MC_ELSE() { @@ -2689,7 +3774,7 @@ FNIEMOP_DEF(iemOp_setc_Eb) /* memory target */ IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) { IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1); } IEM_MC_ELSE() { @@ -2729,7 +3814,7 @@ FNIEMOP_DEF(iemOp_setnc_Eb) /* memory target */ IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) { IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0); } IEM_MC_ELSE() { @@ -2769,7 +3854,7 @@ FNIEMOP_DEF(iemOp_sete_Eb) /* memory target */ IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) { IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1); } IEM_MC_ELSE() { @@ -2809,7 +3894,7 @@ FNIEMOP_DEF(iemOp_setne_Eb) /* memory target */ IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) { IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0); } IEM_MC_ELSE() { @@ -2849,7 +3934,7 @@ FNIEMOP_DEF(iemOp_setbe_Eb) /* memory target */ IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) { IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1); } IEM_MC_ELSE() { @@ -2889,7 +3974,7 @@ FNIEMOP_DEF(iemOp_setnbe_Eb) /* memory target */ IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) { IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0); } IEM_MC_ELSE() { @@ -2929,7 +4014,7 @@ FNIEMOP_DEF(iemOp_sets_Eb) /* memory target */ IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) { IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1); } IEM_MC_ELSE() { @@ -2969,7 +4054,7 @@ FNIEMOP_DEF(iemOp_setns_Eb) /* memory target */ IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) { IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0); } IEM_MC_ELSE() { @@ -3009,7 +4094,7 @@ FNIEMOP_DEF(iemOp_setp_Eb) /* memory target */ IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) { IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1); } IEM_MC_ELSE() { @@ -3049,7 +4134,7 @@ FNIEMOP_DEF(iemOp_setnp_Eb) /* memory target */ IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) { IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0); } IEM_MC_ELSE() { @@ -3089,7 +4174,7 @@ FNIEMOP_DEF(iemOp_setl_Eb) /* memory target */ IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) { IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1); } IEM_MC_ELSE() { @@ -3129,7 +4214,7 @@ FNIEMOP_DEF(iemOp_setnl_Eb) /* memory target */ IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) { IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0); } IEM_MC_ELSE() { @@ -3169,7 +4254,7 @@ FNIEMOP_DEF(iemOp_setle_Eb) /* memory target */ IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) { IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1); } IEM_MC_ELSE() { @@ -3209,7 +4294,7 @@ FNIEMOP_DEF(iemOp_setnle_Eb) /* memory target */ IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) { IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0); } IEM_MC_ELSE() { @@ -3247,7 +4332,7 @@ FNIEMOP_DEF_1(iemOpCommonPushSReg, uint8_t, iReg) IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(uint32_t, u32Value); IEM_MC_FETCH_SREG_ZX_U32(u32Value, iReg); - IEM_MC_PUSH_U32(u32Value); + IEM_MC_PUSH_U32_SREG(u32Value); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); break; @@ -3336,6 +4421,7 @@ FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl) IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); return VINF_SUCCESS; @@ -3383,7 +4469,7 @@ FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl) IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); IEM_MC_LOCAL(int16_t, i16AddrAdj); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_ASSIGN(i16AddrAdj, u16Src); IEM_MC_AND_ARG_U16(u16Src, 0x0f); @@ -3412,7 +4498,7 @@ FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl) IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); IEM_MC_LOCAL(int32_t, i32AddrAdj); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_ASSIGN(i32AddrAdj, u32Src); IEM_MC_AND_ARG_U32(u32Src, 0x1f); @@ -3441,7 +4527,7 @@ FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl) IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); IEM_MC_LOCAL(int64_t, i64AddrAdj); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_ASSIGN(i64AddrAdj, u64Src); IEM_MC_AND_ARG_U64(u64Src, 0x3f); @@ -3520,6 +4606,7 @@ FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl) IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); return VINF_SUCCESS; @@ -3557,7 +4644,7 @@ FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift); IEM_MC_ASSIGN(cShiftArg, cShift); IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); @@ -3579,7 +4666,7 @@ FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift); IEM_MC_ASSIGN(cShiftArg, cShift); IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); @@ -3601,7 +4688,7 @@ FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift); IEM_MC_ASSIGN(cShiftArg, cShift); IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); @@ -3666,6 +4753,7 @@ FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl) IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); return VINF_SUCCESS; @@ -3704,7 +4792,7 @@ FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); IEM_MC_FETCH_EFLAGS(EFlags); @@ -3725,7 +4813,7 @@ FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); IEM_MC_FETCH_EFLAGS(EFlags); @@ -3746,7 +4834,7 @@ FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); IEM_MC_FETCH_EFLAGS(EFlags); @@ -3832,15 +4920,16 @@ FNIEMOP_DEF(iemOp_shrd_Ev_Gv_CL) FNIEMOP_DEF_1(iemOp_Grp15_fxsave, uint8_t, bRm) { IEMOP_MNEMONIC("fxsave m512"); - IEMOP_HLP_NO_LOCK_PREFIX(); if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR)) - return IEMOP_RAISE_INVALID_LOCK_PREFIX(); + return IEMOP_RAISE_INVALID_OPCODE(); IEM_MC_BEGIN(3, 1); - IEM_MC_ARG_CONST(uint8_t, iEffSeg,/*=*/pIemCpu->iEffSeg, 0); + IEM_MC_ARG(uint8_t, iEffSeg, 0); IEM_MC_ARG(RTGCPTR, GCPtrEff, 1); IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg); IEM_MC_CALL_CIMPL_3(iemCImpl_fxsave, iEffSeg, GCPtrEff, enmEffOpSize); IEM_MC_END(); return VINF_SUCCESS; @@ -3851,15 +4940,16 @@ FNIEMOP_DEF_1(iemOp_Grp15_fxsave, uint8_t, bRm) FNIEMOP_DEF_1(iemOp_Grp15_fxrstor, uint8_t, bRm) { IEMOP_MNEMONIC("fxrstor m512"); - IEMOP_HLP_NO_LOCK_PREFIX(); if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR)) - return IEMOP_RAISE_INVALID_LOCK_PREFIX(); + return IEMOP_RAISE_INVALID_OPCODE(); IEM_MC_BEGIN(3, 1); - IEM_MC_ARG_CONST(uint8_t, iEffSeg,/*=*/pIemCpu->iEffSeg, 0); + IEM_MC_ARG(uint8_t, iEffSeg, 0); IEM_MC_ARG(RTGCPTR, GCPtrEff, 1); IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg); IEM_MC_CALL_CIMPL_3(iemCImpl_fxrstor, iEffSeg, GCPtrEff, enmEffOpSize); IEM_MC_END(); return VINF_SUCCESS; @@ -3884,14 +4974,63 @@ FNIEMOP_UD_STUB_1(iemOp_Grp15_xsaveopt, uint8_t, bRm); /** Opcode 0x0f 0xae mem/7. */ FNIEMOP_STUB_1(iemOp_Grp15_clflush, uint8_t, bRm); + /** Opcode 0x0f 0xae 11b/5. */ -FNIEMOP_STUB_1(iemOp_Grp15_lfence, uint8_t, bRm); +FNIEMOP_DEF_1(iemOp_Grp15_lfence, uint8_t, bRm) +{ + IEMOP_MNEMONIC("lfence"); + IEMOP_HLP_NO_LOCK_PREFIX(); + if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2)) + return IEMOP_RAISE_INVALID_OPCODE(); + + IEM_MC_BEGIN(0, 0); + if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2)) + IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_lfence); + else + IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + return VINF_SUCCESS; +} + /** Opcode 0x0f 0xae 11b/6. */ -FNIEMOP_STUB_1(iemOp_Grp15_mfence, uint8_t, bRm); +FNIEMOP_DEF_1(iemOp_Grp15_mfence, uint8_t, bRm) +{ + IEMOP_MNEMONIC("mfence"); + IEMOP_HLP_NO_LOCK_PREFIX(); + if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2)) + return IEMOP_RAISE_INVALID_OPCODE(); + + IEM_MC_BEGIN(0, 0); + if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2)) + IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_mfence); + else + IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + return VINF_SUCCESS; +} + /** Opcode 0x0f 0xae 11b/7. */ -FNIEMOP_STUB_1(iemOp_Grp15_sfence, uint8_t, bRm); +FNIEMOP_DEF_1(iemOp_Grp15_sfence, uint8_t, bRm) +{ + IEMOP_MNEMONIC("sfence"); + IEMOP_HLP_NO_LOCK_PREFIX(); + if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2)) + return IEMOP_RAISE_INVALID_OPCODE(); + + IEM_MC_BEGIN(0, 0); + if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2)) + IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_sfence); + else + IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + return VINF_SUCCESS; +} + /** Opcode 0xf3 0x0f 0xae 11b/0. */ FNIEMOP_UD_STUB_1(iemOp_Grp15_rdfsbase, uint8_t, bRm); @@ -4012,7 +5151,7 @@ FNIEMOP_DEF(iemOp_cmpxchg_Eb_Gb) IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); IEM_MC_LOCAL(uint8_t, u8Al); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING(); IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0); IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); @@ -4134,7 +5273,7 @@ FNIEMOP_DEF(iemOp_cmpxchg_Ev_Gv) IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); IEM_MC_LOCAL(uint16_t, u16Ax); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING(); IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0); IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); @@ -4162,7 +5301,7 @@ FNIEMOP_DEF(iemOp_cmpxchg_Ev_Gv) IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); IEM_MC_LOCAL(uint32_t, u32Eax); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING(); IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0); IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); @@ -4194,7 +5333,7 @@ FNIEMOP_DEF(iemOp_cmpxchg_Ev_Gv) IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); IEM_MC_LOCAL(uint64_t, u64Rax); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING(); IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0); IEM_MC_FETCH_GREG_U64(u64Rax, X86_GREG_xAX); @@ -4227,14 +5366,9 @@ FNIEMOP_DEF(iemOp_cmpxchg_Ev_Gv) } -FNIEMOP_DEF_1(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg) +FNIEMOP_DEF_2(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg, uint8_t, bRm) { - uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); - IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */ - - /* The source cannot be a register. */ - if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) - return IEMOP_RAISE_INVALID_OPCODE(); + Assert((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)); /* Caller checks this */ uint8_t const iGReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg; switch (pIemCpu->enmEffOpSize) @@ -4247,7 +5381,8 @@ FNIEMOP_DEF_1(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg) IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3); IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4); IEM_MC_LOCAL(RTGCPTR, GCPtrEff); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEff); IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 2); IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize); @@ -4262,7 +5397,8 @@ FNIEMOP_DEF_1(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg) IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3); IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4); IEM_MC_LOCAL(RTGCPTR, GCPtrEff); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEff); IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 4); IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize); @@ -4277,8 +5413,12 @@ FNIEMOP_DEF_1(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg) IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3); IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4); IEM_MC_LOCAL(RTGCPTR, GCPtrEff); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm); - IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + if (IEM_IS_GUEST_CPU_AMD(pIemCpu)) /** @todo testcase: rev 3.15 of the amd manuals claims it only loads a 32-bit greg. */ + IEM_MC_FETCH_MEM_U32_SX_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff); + else + IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff); IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 8); IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize); IEM_MC_END(); @@ -4293,7 +5433,10 @@ FNIEMOP_DEF_1(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg) FNIEMOP_DEF(iemOp_lss_Gv_Mp) { IEMOP_MNEMONIC("lss Gv,Mp"); - return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_SS); + uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + return IEMOP_RAISE_INVALID_OPCODE(); + return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_SS, bRm); } @@ -4309,7 +5452,10 @@ FNIEMOP_DEF(iemOp_btr_Ev_Gv) FNIEMOP_DEF(iemOp_lfs_Gv_Mp) { IEMOP_MNEMONIC("lfs Gv,Mp"); - return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_FS); + uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + return IEMOP_RAISE_INVALID_OPCODE(); + return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_FS, bRm); } @@ -4317,7 +5463,10 @@ FNIEMOP_DEF(iemOp_lfs_Gv_Mp) FNIEMOP_DEF(iemOp_lgs_Gv_Mp) { IEMOP_MNEMONIC("lgs Gv,Mp"); - return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_GS); + uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + return IEMOP_RAISE_INVALID_OPCODE(); + return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_GS, bRm); } @@ -4377,7 +5526,7 @@ FNIEMOP_DEF(iemOp_movzx_Gv_Eb) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint16_t, u16Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U8_ZX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value); IEM_MC_ADVANCE_RIP(); @@ -4388,7 +5537,7 @@ FNIEMOP_DEF(iemOp_movzx_Gv_Eb) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint32_t, u32Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U8_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value); IEM_MC_ADVANCE_RIP(); @@ -4399,7 +5548,7 @@ FNIEMOP_DEF(iemOp_movzx_Gv_Eb) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint64_t, u64Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U8_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value); IEM_MC_ADVANCE_RIP(); @@ -4457,7 +5606,7 @@ FNIEMOP_DEF(iemOp_movzx_Gv_Ew) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint32_t, u32Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U16_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value); IEM_MC_ADVANCE_RIP(); @@ -4468,7 +5617,7 @@ FNIEMOP_DEF(iemOp_movzx_Gv_Ew) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint64_t, u64Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U16_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value); IEM_MC_ADVANCE_RIP(); @@ -4540,6 +5689,7 @@ FNIEMOP_DEF(iemOp_Grp8) IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); return VINF_SUCCESS; @@ -4584,16 +5734,16 @@ FNIEMOP_DEF(iemOp_Grp8) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit); IEM_MC_ASSIGN(u16Src, u8Bit & 0x0f); IEM_MC_FETCH_EFLAGS(EFlags); - IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0); + IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0); if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)) IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags); else IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags); - IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW); + IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess); IEM_MC_COMMIT_EFLAGS(EFlags); IEM_MC_ADVANCE_RIP(); @@ -4607,16 +5757,16 @@ FNIEMOP_DEF(iemOp_Grp8) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit); IEM_MC_ASSIGN(u32Src, u8Bit & 0x1f); IEM_MC_FETCH_EFLAGS(EFlags); - IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0); + IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0); if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)) IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); else IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags); - IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW); + IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess); IEM_MC_COMMIT_EFLAGS(EFlags); IEM_MC_ADVANCE_RIP(); @@ -4630,16 +5780,16 @@ FNIEMOP_DEF(iemOp_Grp8) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit); IEM_MC_ASSIGN(u64Src, u8Bit & 0x3f); IEM_MC_FETCH_EFLAGS(EFlags); - IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0); + IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0); if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)) IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags); else IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags); - IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW); + IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess); IEM_MC_COMMIT_EFLAGS(EFlags); IEM_MC_ADVANCE_RIP(); @@ -4735,7 +5885,7 @@ FNIEMOP_DEF(iemOp_movsx_Gv_Eb) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint16_t, u16Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U8_SX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value); IEM_MC_ADVANCE_RIP(); @@ -4746,7 +5896,7 @@ FNIEMOP_DEF(iemOp_movsx_Gv_Eb) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint32_t, u32Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U8_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value); IEM_MC_ADVANCE_RIP(); @@ -4757,7 +5907,7 @@ FNIEMOP_DEF(iemOp_movsx_Gv_Eb) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint64_t, u64Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U8_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value); IEM_MC_ADVANCE_RIP(); @@ -4815,7 +5965,7 @@ FNIEMOP_DEF(iemOp_movsx_Gv_Ew) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint32_t, u32Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U16_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value); IEM_MC_ADVANCE_RIP(); @@ -4826,7 +5976,7 @@ FNIEMOP_DEF(iemOp_movsx_Gv_Ew) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint64_t, u64Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U16_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value); IEM_MC_ADVANCE_RIP(); @@ -4875,7 +6025,7 @@ FNIEMOP_DEF(iemOp_xadd_Eb_Gb) IEM_MC_LOCAL(uint8_t, u8RegCopy); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_GREG_U8(u8RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_REF_LOCAL(pu8Reg, u8RegCopy); @@ -4937,6 +6087,8 @@ FNIEMOP_DEF(iemOp_xadd_Ev_Gv) IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Reg); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); return VINF_SUCCESS; @@ -4974,7 +6126,7 @@ FNIEMOP_DEF(iemOp_xadd_Ev_Gv) IEM_MC_LOCAL(uint16_t, u16RegCopy); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_GREG_U16(u16RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_REF_LOCAL(pu16Reg, u16RegCopy); @@ -4999,7 +6151,7 @@ FNIEMOP_DEF(iemOp_xadd_Ev_Gv) IEM_MC_LOCAL(uint32_t, u32RegCopy); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_GREG_U32(u32RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_REF_LOCAL(pu32Reg, u32RegCopy); @@ -5024,7 +6176,7 @@ FNIEMOP_DEF(iemOp_xadd_Ev_Gv) IEM_MC_LOCAL(uint64_t, u64RegCopy); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_GREG_U64(u64RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_REF_LOCAL(pu64Reg, u64RegCopy); @@ -5076,7 +6228,7 @@ FNIEMOP_DEF_1(iemOp_Grp9_cmpxchg8b_Mq, uint8_t, bRm) IEM_MC_LOCAL(RTUINT64U, u64EbxEcx); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING(); IEM_MC_MEM_MAP(pu64MemDst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); @@ -5217,7 +6369,10 @@ FNIEMOP_DEF_1(iemOpCommonBswapGReg, uint8_t, iReg) FNIEMOP_DEF(iemOp_bswap_rAX_r8) { IEMOP_MNEMONIC("bswap rAX/r8"); - return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xAX | pIemCpu->uRexReg); + /* Note! Intel manuals states that R8-R15 can be accessed by using a REX.X + prefix. REX.B is the correct prefix it appears. For a parallel + case, see iemOp_mov_AL_Ib and iemOp_mov_eAX_Iv. */ + return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xAX | pIemCpu->uRexB); } @@ -5225,7 +6380,7 @@ FNIEMOP_DEF(iemOp_bswap_rAX_r8) FNIEMOP_DEF(iemOp_bswap_rCX_r9) { IEMOP_MNEMONIC("bswap rCX/r9"); - return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xCX | pIemCpu->uRexReg); + return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xCX | pIemCpu->uRexB); } @@ -5233,7 +6388,7 @@ FNIEMOP_DEF(iemOp_bswap_rCX_r9) FNIEMOP_DEF(iemOp_bswap_rDX_r10) { IEMOP_MNEMONIC("bswap rDX/r9"); - return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDX | pIemCpu->uRexReg); + return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDX | pIemCpu->uRexB); } @@ -5241,7 +6396,7 @@ FNIEMOP_DEF(iemOp_bswap_rDX_r10) FNIEMOP_DEF(iemOp_bswap_rBX_r11) { IEMOP_MNEMONIC("bswap rBX/r9"); - return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBX | pIemCpu->uRexReg); + return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBX | pIemCpu->uRexB); } @@ -5249,7 +6404,7 @@ FNIEMOP_DEF(iemOp_bswap_rBX_r11) FNIEMOP_DEF(iemOp_bswap_rSP_r12) { IEMOP_MNEMONIC("bswap rSP/r12"); - return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSP | pIemCpu->uRexReg); + return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSP | pIemCpu->uRexB); } @@ -5257,7 +6412,7 @@ FNIEMOP_DEF(iemOp_bswap_rSP_r12) FNIEMOP_DEF(iemOp_bswap_rBP_r13) { IEMOP_MNEMONIC("bswap rBP/r13"); - return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBP | pIemCpu->uRexReg); + return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBP | pIemCpu->uRexB); } @@ -5265,7 +6420,7 @@ FNIEMOP_DEF(iemOp_bswap_rBP_r13) FNIEMOP_DEF(iemOp_bswap_rSI_r14) { IEMOP_MNEMONIC("bswap rSI/r14"); - return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSI | pIemCpu->uRexReg); + return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSI | pIemCpu->uRexB); } @@ -5273,7 +6428,7 @@ FNIEMOP_DEF(iemOp_bswap_rSI_r14) FNIEMOP_DEF(iemOp_bswap_rDI_r15) { IEMOP_MNEMONIC("bswap rDI/r15"); - return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDI | pIemCpu->uRexReg); + return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDI | pIemCpu->uRexB); } @@ -5292,8 +6447,57 @@ FNIEMOP_STUB(iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq); FNIEMOP_STUB(iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq); /** Opcode 0x0f 0xd6. */ FNIEMOP_STUB(iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq); + + /** Opcode 0x0f 0xd7. */ -FNIEMOP_STUB(iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq); +FNIEMOP_DEF(iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq) +{ + /* Docs says register only. */ + uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); + if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)) /** @todo test that this is registers only. */ + return IEMOP_RAISE_INVALID_OPCODE(); + + /* Note! Taking the lazy approch here wrt the high 32-bits of the GREG. */ + /** @todo testcase: Check that the instruction implicitly clears the high + * bits in 64-bit mode. The REX.W is first necessary when VLMAX > 256 + * and opcode modifications are made to work with the whole width (not + * just 128). */ + switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) + { + case IEM_OP_PRF_SIZE_OP: /* SSE */ + IEMOP_MNEMONIC("pmovmskb Gd,Nq"); + IEMOP_HLP_DECODED_NL_2(OP_PMOVMSKB, IEMOPFORM_RM_REG, OP_PARM_Gd, OP_PARM_Vdq, DISOPTYPE_SSE | DISOPTYPE_HARMLESS); + IEM_MC_BEGIN(2, 0); + IEM_MC_ARG(uint64_t *, pDst, 0); + IEM_MC_ARG(uint128_t const *, pSrc, 1); + IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); + IEM_MC_REF_GREG_U64(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); + IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); + IEM_MC_CALL_SSE_AIMPL_2(iemAImpl_pmovmskb_u128, pDst, pSrc); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + return VINF_SUCCESS; + + case 0: /* MMX */ + IEMOP_MNEMONIC("pmovmskb Gd,Udq"); + IEMOP_HLP_DECODED_NL_2(OP_PMOVMSKB, IEMOPFORM_RM_REG, OP_PARM_Gd, OP_PARM_Vdq, DISOPTYPE_MMX | DISOPTYPE_HARMLESS); + IEM_MC_BEGIN(2, 0); + IEM_MC_ARG(uint64_t *, pDst, 0); + IEM_MC_ARG(uint64_t const *, pSrc, 1); + IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT(); + IEM_MC_REF_GREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); + IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK); + IEM_MC_CALL_MMX_AIMPL_2(iemAImpl_pmovmskb_u64, pDst, pSrc); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + return VINF_SUCCESS; + + default: + return IEMOP_RAISE_INVALID_OPCODE(); + } +} + + /** Opcode 0x0f 0xd8. */ FNIEMOP_STUB(iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq); /** Opcode 0x0f 0xd9. */ @@ -5340,8 +6544,16 @@ FNIEMOP_STUB(iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq); FNIEMOP_STUB(iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq); /** Opcode 0x0f 0xee. */ FNIEMOP_STUB(iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq); + + /** Opcode 0x0f 0xef. */ -FNIEMOP_STUB(iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq); +FNIEMOP_DEF(iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq) +{ + IEMOP_MNEMONIC("pxor"); + return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pxor); +} + + /** Opcode 0x0f 0xf0. */ FNIEMOP_STUB(iemOp_lddqu_Vdq_Mdq); /** Opcode 0x0f 0xf1. */ @@ -5359,7 +6571,7 @@ FNIEMOP_STUB(iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq); /** Opcode 0x0f 0xf7. */ FNIEMOP_STUB(iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq); /** Opcode 0x0f 0xf8. */ -FNIEMOP_STUB(iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq); +FNIEMOP_STUB(iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq); //NEXT /** Opcode 0x0f 0xf9. */ FNIEMOP_STUB(iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq); /** Opcode 0x0f 0xfa. */ @@ -5966,6 +7178,7 @@ FNIEMOP_DEF(iemOp_and_eAX_Iz) /** Opcode 0x26. */ FNIEMOP_DEF(iemOp_seg_ES) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg es"); pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_ES; pIemCpu->iEffSeg = X86_SREG_ES; @@ -5975,7 +7188,14 @@ FNIEMOP_DEF(iemOp_seg_ES) /** Opcode 0x27. */ -FNIEMOP_STUB(iemOp_daa); +FNIEMOP_DEF(iemOp_daa) +{ + IEMOP_MNEMONIC("daa AL"); + IEMOP_HLP_NO_64BIT(); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF); + return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_daa); +} /** Opcode 0x28. */ @@ -6029,6 +7249,7 @@ FNIEMOP_DEF(iemOp_sub_eAX_Iz) /** Opcode 0x2e. */ FNIEMOP_DEF(iemOp_seg_CS) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg cs"); pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_CS; pIemCpu->iEffSeg = X86_SREG_CS; @@ -6038,7 +7259,14 @@ FNIEMOP_DEF(iemOp_seg_CS) /** Opcode 0x2f. */ -FNIEMOP_STUB(iemOp_das); +FNIEMOP_DEF(iemOp_das) +{ + IEMOP_MNEMONIC("das AL"); + IEMOP_HLP_NO_64BIT(); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF); + return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_das); +} /** Opcode 0x30. */ @@ -6098,6 +7326,7 @@ FNIEMOP_DEF(iemOp_xor_eAX_Iz) /** Opcode 0x36. */ FNIEMOP_DEF(iemOp_seg_SS) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg ss"); pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_SS; pIemCpu->iEffSeg = X86_SREG_SS; @@ -6163,6 +7392,7 @@ FNIEMOP_DEF(iemOp_cmp_eAX_Iz) /** Opcode 0x3e. */ FNIEMOP_DEF(iemOp_seg_DS) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg ds"); pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_DS; pIemCpu->iEffSeg = X86_SREG_DS; @@ -6200,6 +7430,7 @@ FNIEMOP_DEF_2(iemOpCommonUnaryGReg, PCIEMOPUNARYSIZES, pImpl, uint8_t, iReg) IEM_MC_REF_GREG_U32(pu32Dst, iReg); IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); return VINF_SUCCESS; @@ -6227,6 +7458,7 @@ FNIEMOP_DEF(iemOp_inc_eAX) */ if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex"); pIemCpu->fPrefixes |= IEM_OP_PRF_REX; uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); @@ -6246,6 +7478,7 @@ FNIEMOP_DEF(iemOp_inc_eCX) */ if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.b"); pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B; pIemCpu->uRexB = 1 << 3; @@ -6266,6 +7499,7 @@ FNIEMOP_DEF(iemOp_inc_eDX) */ if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.x"); pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X; pIemCpu->uRexIndex = 1 << 3; @@ -6287,6 +7521,7 @@ FNIEMOP_DEF(iemOp_inc_eBX) */ if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bx"); pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X; pIemCpu->uRexB = 1 << 3; pIemCpu->uRexIndex = 1 << 3; @@ -6308,6 +7543,7 @@ FNIEMOP_DEF(iemOp_inc_eSP) */ if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.r"); pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R; pIemCpu->uRexReg = 1 << 3; @@ -6328,6 +7564,7 @@ FNIEMOP_DEF(iemOp_inc_eBP) */ if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rb"); pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B; pIemCpu->uRexReg = 1 << 3; pIemCpu->uRexB = 1 << 3; @@ -6349,6 +7586,7 @@ FNIEMOP_DEF(iemOp_inc_eSI) */ if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rx"); pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X; pIemCpu->uRexReg = 1 << 3; pIemCpu->uRexIndex = 1 << 3; @@ -6370,6 +7608,7 @@ FNIEMOP_DEF(iemOp_inc_eDI) */ if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbx"); pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X; pIemCpu->uRexReg = 1 << 3; pIemCpu->uRexB = 1 << 3; @@ -6392,6 +7631,7 @@ FNIEMOP_DEF(iemOp_dec_eAX) */ if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.w"); pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_SIZE_REX_W; iemRecalEffOpSize(pIemCpu); @@ -6412,6 +7652,7 @@ FNIEMOP_DEF(iemOp_dec_eCX) */ if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bw"); pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W; pIemCpu->uRexB = 1 << 3; iemRecalEffOpSize(pIemCpu); @@ -6433,6 +7674,7 @@ FNIEMOP_DEF(iemOp_dec_eDX) */ if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.xw"); pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W; pIemCpu->uRexIndex = 1 << 3; iemRecalEffOpSize(pIemCpu); @@ -6454,6 +7696,7 @@ FNIEMOP_DEF(iemOp_dec_eBX) */ if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bxw"); pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W; pIemCpu->uRexB = 1 << 3; pIemCpu->uRexIndex = 1 << 3; @@ -6476,6 +7719,7 @@ FNIEMOP_DEF(iemOp_dec_eSP) */ if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rw"); pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_SIZE_REX_W; pIemCpu->uRexReg = 1 << 3; iemRecalEffOpSize(pIemCpu); @@ -6497,6 +7741,7 @@ FNIEMOP_DEF(iemOp_dec_eBP) */ if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbw"); pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W; pIemCpu->uRexReg = 1 << 3; pIemCpu->uRexB = 1 << 3; @@ -6519,6 +7764,7 @@ FNIEMOP_DEF(iemOp_dec_eSI) */ if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rxw"); pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W; pIemCpu->uRexReg = 1 << 3; pIemCpu->uRexIndex = 1 << 3; @@ -6541,6 +7787,7 @@ FNIEMOP_DEF(iemOp_dec_eDI) */ if (pIemCpu->enmCpuMode == IEMMODE_64BIT) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbxw"); pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W; pIemCpu->uRexReg = 1 << 3; pIemCpu->uRexB = 1 << 3; @@ -6680,10 +7927,6 @@ FNIEMOP_DEF_1(iemOpCommonPopGReg, uint8_t, iReg) pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT; } -/** @todo How does this code handle iReg==X86_GREG_xSP. How does a real CPU - * handle it, for that matter (Intel pseudo code hints that the popped - * value is incremented by the stack item size.) Test it, both encodings - * and all three register sizes. */ switch (pIemCpu->enmEffOpSize) { case IEMMODE_16BIT: @@ -6700,6 +7943,7 @@ FNIEMOP_DEF_1(iemOpCommonPopGReg, uint8_t, iReg) IEM_MC_LOCAL(uint32_t, *pu32Dst); IEM_MC_REF_GREG_U32(pu32Dst, iReg); IEM_MC_POP_U32(pu32Dst); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); /** @todo testcase*/ IEM_MC_ADVANCE_RIP(); IEM_MC_END(); break; @@ -6754,7 +7998,48 @@ FNIEMOP_DEF(iemOp_pop_eBX) FNIEMOP_DEF(iemOp_pop_eSP) { IEMOP_MNEMONIC("pop rSP"); - return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP); + if (pIemCpu->enmCpuMode == IEMMODE_64BIT) + { + if (pIemCpu->uRexB) + return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP); + pIemCpu->enmDefOpSize = IEMMODE_64BIT; + pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT; + } + + IEMOP_HLP_DECODED_NL_1(OP_POP, IEMOPFORM_FIXED, OP_PARM_REG_ESP, + DISOPTYPE_HARMLESS | DISOPTYPE_DEFAULT_64_OP_SIZE | DISOPTYPE_REXB_EXTENDS_OPREG); + /** @todo add testcase for this instruction. */ + switch (pIemCpu->enmEffOpSize) + { + case IEMMODE_16BIT: + IEM_MC_BEGIN(0, 1); + IEM_MC_LOCAL(uint16_t, u16Dst); + IEM_MC_POP_U16(&u16Dst); /** @todo not correct MC, fix later. */ + IEM_MC_STORE_GREG_U16(X86_GREG_xSP, u16Dst); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + break; + + case IEMMODE_32BIT: + IEM_MC_BEGIN(0, 1); + IEM_MC_LOCAL(uint32_t, u32Dst); + IEM_MC_POP_U32(&u32Dst); + IEM_MC_STORE_GREG_U32(X86_GREG_xSP, u32Dst); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + break; + + case IEMMODE_64BIT: + IEM_MC_BEGIN(0, 1); + IEM_MC_LOCAL(uint64_t, u64Dst); + IEM_MC_POP_U64(&u64Dst); + IEM_MC_STORE_GREG_U64(X86_GREG_xSP, u64Dst); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + break; + } + + return VINF_SUCCESS; } @@ -6808,13 +8093,105 @@ FNIEMOP_DEF(iemOp_popa) /** Opcode 0x62. */ FNIEMOP_STUB(iemOp_bound_Gv_Ma); -/** Opcode 0x63. */ -FNIEMOP_STUB(iemOp_arpl_Ew_Gw); + + +/** Opcode 0x63 - non-64-bit modes. */ +FNIEMOP_DEF(iemOp_arpl_Ew_Gw) +{ + IEMOP_MNEMONIC("arpl Ew,Gw"); + IEMOP_HLP_NO_REAL_OR_V86_MODE(); + uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); + + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* Register */ + IEMOP_HLP_DECODED_NL_2(OP_ARPL, IEMOPFORM_MR_REG, OP_PARM_Ew, OP_PARM_Gw, DISOPTYPE_HARMLESS); + IEM_MC_BEGIN(3, 0); + IEM_MC_ARG(uint16_t *, pu16Dst, 0); + IEM_MC_ARG(uint16_t, u16Src, 1); + IEM_MC_ARG(uint32_t *, pEFlags, 2); + + IEM_MC_FETCH_GREG_U16(u16Src, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); + IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK)); + IEM_MC_REF_EFLAGS(pEFlags); + IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_arpl, pu16Dst, u16Src, pEFlags); + + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* Memory */ + IEM_MC_BEGIN(3, 2); + IEM_MC_ARG(uint16_t *, pu16Dst, 0); + IEM_MC_ARG(uint16_t, u16Src, 1); + IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); + + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); + IEMOP_HLP_DECODED_NL_2(OP_ARPL, IEMOPFORM_MR_REG, OP_PARM_Ew, OP_PARM_Gw, DISOPTYPE_HARMLESS); + IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); + IEM_MC_FETCH_GREG_U16(u16Src, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); + IEM_MC_FETCH_EFLAGS(EFlags); + IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_arpl, pu16Dst, u16Src, pEFlags); + + IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW); + IEM_MC_COMMIT_EFLAGS(EFlags); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; + +} + + +/** Opcode 0x63. + * @note This is a weird one. It works like a regular move instruction if + * REX.W isn't set, at least according to AMD docs (rev 3.15, 2009-11). + * @todo This definitely needs a testcase to verify the odd cases. */ +FNIEMOP_DEF(iemOp_movsxd_Gv_Ev) +{ + Assert(pIemCpu->enmEffOpSize == IEMMODE_64BIT); /* Caller branched already . */ + + IEMOP_MNEMONIC("movsxd Gv,Ev"); + uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); + + if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + /* + * Register to register. + */ + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_BEGIN(0, 1); + IEM_MC_LOCAL(uint64_t, u64Value); + IEM_MC_FETCH_GREG_U32_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); + IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + else + { + /* + * We're loading a register from memory. + */ + IEM_MC_BEGIN(0, 2); + IEM_MC_LOCAL(uint64_t, u64Value); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_FETCH_MEM_U32_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst); + IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value); + IEM_MC_ADVANCE_RIP(); + IEM_MC_END(); + } + return VINF_SUCCESS; +} /** Opcode 0x64. */ FNIEMOP_DEF(iemOp_seg_FS) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg fs"); pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_FS; pIemCpu->iEffSeg = X86_SREG_FS; @@ -6826,6 +8203,7 @@ FNIEMOP_DEF(iemOp_seg_FS) /** Opcode 0x65. */ FNIEMOP_DEF(iemOp_seg_GS) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg gs"); pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_GS; pIemCpu->iEffSeg = X86_SREG_GS; @@ -6837,6 +8215,7 @@ FNIEMOP_DEF(iemOp_seg_GS) /** Opcode 0x66. */ FNIEMOP_DEF(iemOp_op_size) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("op size"); pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_OP; iemRecalEffOpSize(pIemCpu); @@ -6848,6 +8227,7 @@ FNIEMOP_DEF(iemOp_op_size) /** Opcode 0x67. */ FNIEMOP_DEF(iemOp_addr_size) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("addr size"); pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_ADDR; switch (pIemCpu->enmDefAddrMode) { @@ -6949,7 +8329,7 @@ FNIEMOP_DEF(iemOp_imul_Gv_Ev_Iz) IEM_MC_LOCAL(uint16_t, u16Tmp); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2); uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm); IEM_MC_ASSIGN(u16Src, u16Imm); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); @@ -6998,7 +8378,7 @@ FNIEMOP_DEF(iemOp_imul_Gv_Ev_Iz) IEM_MC_LOCAL(uint32_t, u32Tmp); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4); uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm); IEM_MC_ASSIGN(u32Src, u32Imm); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); @@ -7047,7 +8427,7 @@ FNIEMOP_DEF(iemOp_imul_Gv_Ev_Iz) IEM_MC_LOCAL(uint64_t, u64Tmp); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4); uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm); IEM_MC_ASSIGN(u64Src, u64Imm); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); @@ -7135,7 +8515,7 @@ FNIEMOP_DEF(iemOp_imul_Gv_Ev_Ib) IEM_MC_LOCAL(uint16_t, u16Tmp); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint16_t u16Imm; IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16Imm); IEM_MC_ASSIGN(u16Src, u16Imm); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); @@ -7182,7 +8562,7 @@ FNIEMOP_DEF(iemOp_imul_Gv_Ev_Ib) IEM_MC_LOCAL(uint32_t, u32Tmp); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint32_t u32Imm; IEM_OPCODE_GET_NEXT_S8_SX_U32(&u32Imm); IEM_MC_ASSIGN(u32Src, u32Imm); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); @@ -7229,7 +8609,7 @@ FNIEMOP_DEF(iemOp_imul_Gv_Ev_Ib) IEM_MC_LOCAL(uint64_t, u64Tmp); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S8_SX_U64(&u64Imm); IEM_MC_ASSIGN(u64Src, u64Imm); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); @@ -7257,9 +8637,9 @@ FNIEMOP_DEF(iemOp_insb_Yb_DX) IEMOP_MNEMONIC("rep ins Yb,DX"); switch (pIemCpu->enmEffAddrMode) { - case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr16); - case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr32); - case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr64); + case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr16, false); + case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr32, false); + case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr64, false); IEM_NOT_REACHED_DEFAULT_CASE_RET(); } } @@ -7268,9 +8648,9 @@ FNIEMOP_DEF(iemOp_insb_Yb_DX) IEMOP_MNEMONIC("ins Yb,DX"); switch (pIemCpu->enmEffAddrMode) { - case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr16); - case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr32); - case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr64); + case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr16, false); + case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr32, false); + case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr64, false); IEM_NOT_REACHED_DEFAULT_CASE_RET(); } } @@ -7289,9 +8669,9 @@ FNIEMOP_DEF(iemOp_inswd_Yv_DX) case IEMMODE_16BIT: switch (pIemCpu->enmEffAddrMode) { - case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr16); - case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr32); - case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr64); + case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr16, false); + case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr32, false); + case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr64, false); IEM_NOT_REACHED_DEFAULT_CASE_RET(); } break; @@ -7299,9 +8679,9 @@ FNIEMOP_DEF(iemOp_inswd_Yv_DX) case IEMMODE_32BIT: switch (pIemCpu->enmEffAddrMode) { - case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr16); - case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr32); - case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr64); + case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr16, false); + case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr32, false); + case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr64, false); IEM_NOT_REACHED_DEFAULT_CASE_RET(); } break; @@ -7316,9 +8696,9 @@ FNIEMOP_DEF(iemOp_inswd_Yv_DX) case IEMMODE_16BIT: switch (pIemCpu->enmEffAddrMode) { - case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr16); - case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr32); - case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr64); + case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr16, false); + case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr32, false); + case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr64, false); IEM_NOT_REACHED_DEFAULT_CASE_RET(); } break; @@ -7326,9 +8706,9 @@ FNIEMOP_DEF(iemOp_inswd_Yv_DX) case IEMMODE_32BIT: switch (pIemCpu->enmEffAddrMode) { - case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr16); - case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr32); - case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr64); + case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr16, false); + case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr32, false); + case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr64, false); IEM_NOT_REACHED_DEFAULT_CASE_RET(); } break; @@ -7347,9 +8727,9 @@ FNIEMOP_DEF(iemOp_outsb_Yb_DX) IEMOP_MNEMONIC("rep out DX,Yb"); switch (pIemCpu->enmEffAddrMode) { - case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr16, pIemCpu->iEffSeg); - case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr32, pIemCpu->iEffSeg); - case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr64, pIemCpu->iEffSeg); + case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr16, pIemCpu->iEffSeg, false); + case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr32, pIemCpu->iEffSeg, false); + case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr64, pIemCpu->iEffSeg, false); IEM_NOT_REACHED_DEFAULT_CASE_RET(); } } @@ -7358,9 +8738,9 @@ FNIEMOP_DEF(iemOp_outsb_Yb_DX) IEMOP_MNEMONIC("out DX,Yb"); switch (pIemCpu->enmEffAddrMode) { - case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr16, pIemCpu->iEffSeg); - case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr32, pIemCpu->iEffSeg); - case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr64, pIemCpu->iEffSeg); + case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr16, pIemCpu->iEffSeg, false); + case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr32, pIemCpu->iEffSeg, false); + case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr64, pIemCpu->iEffSeg, false); IEM_NOT_REACHED_DEFAULT_CASE_RET(); } } @@ -7379,9 +8759,9 @@ FNIEMOP_DEF(iemOp_outswd_Yv_DX) case IEMMODE_16BIT: switch (pIemCpu->enmEffAddrMode) { - case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg); - case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg); - case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg); + case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg, false); + case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg, false); + case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg, false); IEM_NOT_REACHED_DEFAULT_CASE_RET(); } break; @@ -7389,9 +8769,9 @@ FNIEMOP_DEF(iemOp_outswd_Yv_DX) case IEMMODE_32BIT: switch (pIemCpu->enmEffAddrMode) { - case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg); - case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg); - case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg); + case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg, false); + case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg, false); + case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg, false); IEM_NOT_REACHED_DEFAULT_CASE_RET(); } break; @@ -7406,9 +8786,9 @@ FNIEMOP_DEF(iemOp_outswd_Yv_DX) case IEMMODE_16BIT: switch (pIemCpu->enmEffAddrMode) { - case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr16, pIemCpu->iEffSeg); - case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr32, pIemCpu->iEffSeg); - case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr64, pIemCpu->iEffSeg); + case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr16, pIemCpu->iEffSeg, false); + case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr32, pIemCpu->iEffSeg, false); + case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr64, pIemCpu->iEffSeg, false); IEM_NOT_REACHED_DEFAULT_CASE_RET(); } break; @@ -7416,9 +8796,9 @@ FNIEMOP_DEF(iemOp_outswd_Yv_DX) case IEMMODE_32BIT: switch (pIemCpu->enmEffAddrMode) { - case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr16, pIemCpu->iEffSeg); - case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr32, pIemCpu->iEffSeg); - case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr64, pIemCpu->iEffSeg); + case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr16, pIemCpu->iEffSeg, false); + case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr32, pIemCpu->iEffSeg, false); + case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr64, pIemCpu->iEffSeg, false); IEM_NOT_REACHED_DEFAULT_CASE_RET(); } break; @@ -7771,7 +9151,7 @@ FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_80) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1); @@ -7836,7 +9216,7 @@ FNIEMOP_DEF(iemOp_Grp1_Ev_Iz) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2); uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm); IEM_MC_ASSIGN(u16Src, u16Imm); IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); @@ -7869,6 +9249,7 @@ FNIEMOP_DEF(iemOp_Grp1_Ev_Iz) IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); @@ -7890,7 +9271,7 @@ FNIEMOP_DEF(iemOp_Grp1_Ev_Iz) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4); uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm); IEM_MC_ASSIGN(u32Src, u32Imm); IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); @@ -7944,7 +9325,7 @@ FNIEMOP_DEF(iemOp_Grp1_Ev_Iz) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4); uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm); IEM_MC_ASSIGN(u64Src, u64Imm); IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); @@ -8016,6 +9397,7 @@ FNIEMOP_DEF(iemOp_Grp1_Ev_Ib) IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); @@ -8063,7 +9445,7 @@ FNIEMOP_DEF(iemOp_Grp1_Ev_Ib) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); IEM_MC_ASSIGN(u16Src, (int8_t)u8Imm); IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); @@ -8088,7 +9470,7 @@ FNIEMOP_DEF(iemOp_Grp1_Ev_Ib) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); IEM_MC_ASSIGN(u32Src, (int8_t)u8Imm); IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); @@ -8113,7 +9495,7 @@ FNIEMOP_DEF(iemOp_Grp1_Ev_Ib) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); IEM_MC_ASSIGN(u64Src, (int8_t)u8Imm); IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); @@ -8191,7 +9573,7 @@ FNIEMOP_DEF(iemOp_xchg_Eb_Gb) IEM_MC_ARG(uint8_t *, pu8Reg, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu8Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u8, pu8Mem, pu8Reg); @@ -8278,7 +9660,7 @@ FNIEMOP_DEF(iemOp_xchg_Ev_Gv) IEM_MC_ARG(uint16_t *, pu16Reg, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu16Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u16, pu16Mem, pu16Reg); @@ -8294,12 +9676,13 @@ FNIEMOP_DEF(iemOp_xchg_Ev_Gv) IEM_MC_ARG(uint32_t *, pu32Reg, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu32Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u32, pu32Mem, pu32Reg); IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Mem, IEM_ACCESS_DATA_RW); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Reg); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); return VINF_SUCCESS; @@ -8310,7 +9693,7 @@ FNIEMOP_DEF(iemOp_xchg_Ev_Gv) IEM_MC_ARG(uint64_t *, pu64Reg, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu64Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u64, pu64Mem, pu64Reg); @@ -8355,7 +9738,7 @@ FNIEMOP_DEF(iemOp_mov_Eb_Gb) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint8_t, u8Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Value); IEM_MC_ADVANCE_RIP(); @@ -8420,7 +9803,7 @@ FNIEMOP_DEF(iemOp_mov_Ev_Gv) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint16_t, u16Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value); IEM_MC_ADVANCE_RIP(); @@ -8431,7 +9814,7 @@ FNIEMOP_DEF(iemOp_mov_Ev_Gv) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint32_t, u32Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Value); IEM_MC_ADVANCE_RIP(); @@ -8442,7 +9825,7 @@ FNIEMOP_DEF(iemOp_mov_Ev_Gv) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint64_t, u64Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Value); IEM_MC_ADVANCE_RIP(); @@ -8482,7 +9865,7 @@ FNIEMOP_DEF(iemOp_mov_Gb_Eb) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint8_t, u8Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value); IEM_MC_ADVANCE_RIP(); @@ -8546,7 +9929,7 @@ FNIEMOP_DEF(iemOp_mov_Gv_Ev) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint16_t, u16Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value); IEM_MC_ADVANCE_RIP(); @@ -8557,7 +9940,7 @@ FNIEMOP_DEF(iemOp_mov_Gv_Ev) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint32_t, u32Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value); IEM_MC_ADVANCE_RIP(); @@ -8568,7 +9951,7 @@ FNIEMOP_DEF(iemOp_mov_Gv_Ev) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint64_t, u64Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value); IEM_MC_ADVANCE_RIP(); @@ -8580,6 +9963,17 @@ FNIEMOP_DEF(iemOp_mov_Gv_Ev) } +/** Opcode 0x63. */ +FNIEMOP_DEF(iemOp_arpl_Ew_Gw_movsx_Gv_Ev) +{ + if (pIemCpu->enmCpuMode != IEMMODE_64BIT) + return FNIEMOP_CALL(iemOp_arpl_Ew_Gw); + if (pIemCpu->enmEffOpSize != IEMMODE_64BIT) + return FNIEMOP_CALL(iemOp_mov_Gv_Ev); + return FNIEMOP_CALL(iemOp_movsxd_Gv_Ev); +} + + /** Opcode 0x8c. */ FNIEMOP_DEF(iemOp_mov_Ev_Sw) { @@ -8644,7 +10038,7 @@ FNIEMOP_DEF(iemOp_mov_Ev_Sw) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint16_t, u16Value); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_SREG_U16(u16Value, iSegReg); IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value); IEM_MC_ADVANCE_RIP(); @@ -8663,7 +10057,7 @@ FNIEMOP_DEF(iemOp_lea_Gv_M) uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */ if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) - return IEMOP_RAISE_INVALID_LOCK_PREFIX(); /* no register form */ + return IEMOP_RAISE_INVALID_OPCODE(); /* no register form */ switch (pIemCpu->enmEffOpSize) { @@ -8671,7 +10065,7 @@ FNIEMOP_DEF(iemOp_lea_Gv_M) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); IEM_MC_LOCAL(uint16_t, u16Cast); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEM_MC_ASSIGN_TO_SMALLER(u16Cast, GCPtrEffSrc); IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Cast); IEM_MC_ADVANCE_RIP(); @@ -8682,7 +10076,7 @@ FNIEMOP_DEF(iemOp_lea_Gv_M) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); IEM_MC_LOCAL(uint32_t, u32Cast); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEM_MC_ASSIGN_TO_SMALLER(u32Cast, GCPtrEffSrc); IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Cast); IEM_MC_ADVANCE_RIP(); @@ -8692,7 +10086,7 @@ FNIEMOP_DEF(iemOp_lea_Gv_M) case IEMMODE_64BIT: IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); @@ -8748,7 +10142,7 @@ FNIEMOP_DEF(iemOp_mov_Sw_Ev) IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0); IEM_MC_ARG(uint16_t, u16Value, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value); IEM_MC_END(); @@ -8795,7 +10189,7 @@ FNIEMOP_DEF_1(iemOp_pop_Ev, uint8_t, bRm) uint8_t const offOpcodeSaved = pIemCpu->offOpcode; RTGCPTR GCPtrEff; VBOXSTRICTRC rcStrict; - rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff); + rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, 0, &GCPtrEff); if (rcStrict != VINF_SUCCESS) return rcStrict; pIemCpu->offOpcode = offOpcodeSaved; @@ -8804,12 +10198,12 @@ FNIEMOP_DEF_1(iemOp_pop_Ev, uint8_t, bRm) uint64_t const RspSaved = pCtx->rsp; switch (pIemCpu->enmEffOpSize) { - case IEMMODE_16BIT: iemRegAddToRsp(pCtx, 2); break; - case IEMMODE_32BIT: iemRegAddToRsp(pCtx, 4); break; - case IEMMODE_64BIT: iemRegAddToRsp(pCtx, 8); break; + case IEMMODE_16BIT: iemRegAddToRsp(pIemCpu, pCtx, 2); break; + case IEMMODE_32BIT: iemRegAddToRsp(pIemCpu, pCtx, 4); break; + case IEMMODE_64BIT: iemRegAddToRsp(pIemCpu, pCtx, 8); break; IEM_NOT_REACHED_DEFAULT_CASE_RET(); } - rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff); + rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, 0, &GCPtrEff); Assert(rcStrict == VINF_SUCCESS); pCtx->rsp = RspSaved; @@ -8841,7 +10235,7 @@ FNIEMOP_DEF_1(iemOp_pop_Ev, uint8_t, bRm) uint64_t u64Value; rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp); if (rcStrict == VINF_SUCCESS) - rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u64Value); + rcStrict = iemMemStoreDataU64(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u64Value); break; } @@ -8850,7 +10244,7 @@ FNIEMOP_DEF_1(iemOp_pop_Ev, uint8_t, bRm) if (rcStrict == VINF_SUCCESS) { pCtx->rsp = TmpRsp.u; - iemRegUpdateRip(pIemCpu); + iemRegUpdateRipAndClearRF(pIemCpu); } return rcStrict; @@ -10178,7 +11572,7 @@ FNIEMOP_DEF_1(iemOpCommonMov_r8_Ib, uint8_t, iReg) FNIEMOP_DEF(iemOp_mov_AL_Ib) { IEMOP_MNEMONIC("mov AL,Ib"); - return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xAX); + return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xAX | pIemCpu->uRexB); } @@ -10186,7 +11580,7 @@ FNIEMOP_DEF(iemOp_mov_AL_Ib) FNIEMOP_DEF(iemOp_CL_Ib) { IEMOP_MNEMONIC("mov CL,Ib"); - return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xCX); + return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xCX | pIemCpu->uRexB); } @@ -10194,7 +11588,7 @@ FNIEMOP_DEF(iemOp_CL_Ib) FNIEMOP_DEF(iemOp_DL_Ib) { IEMOP_MNEMONIC("mov DL,Ib"); - return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDX); + return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDX | pIemCpu->uRexB); } @@ -10202,7 +11596,7 @@ FNIEMOP_DEF(iemOp_DL_Ib) FNIEMOP_DEF(iemOp_BL_Ib) { IEMOP_MNEMONIC("mov BL,Ib"); - return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBX); + return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBX | pIemCpu->uRexB); } @@ -10210,7 +11604,7 @@ FNIEMOP_DEF(iemOp_BL_Ib) FNIEMOP_DEF(iemOp_mov_AH_Ib) { IEMOP_MNEMONIC("mov AH,Ib"); - return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSP); + return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSP | pIemCpu->uRexB); } @@ -10218,7 +11612,7 @@ FNIEMOP_DEF(iemOp_mov_AH_Ib) FNIEMOP_DEF(iemOp_CH_Ib) { IEMOP_MNEMONIC("mov CH,Ib"); - return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBP); + return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBP | pIemCpu->uRexB); } @@ -10226,7 +11620,7 @@ FNIEMOP_DEF(iemOp_CH_Ib) FNIEMOP_DEF(iemOp_DH_Ib) { IEMOP_MNEMONIC("mov DH,Ib"); - return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSI); + return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSI | pIemCpu->uRexB); } @@ -10234,7 +11628,7 @@ FNIEMOP_DEF(iemOp_DH_Ib) FNIEMOP_DEF(iemOp_BH_Ib) { IEMOP_MNEMONIC("mov BH,Ib"); - return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDI); + return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDI | pIemCpu->uRexB); } @@ -10272,7 +11666,7 @@ FNIEMOP_DEF_1(iemOpCommonMov_Rv_Iv, uint8_t, iReg) } case IEMMODE_64BIT: { - uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm); + uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm); /* 64-bit immediate! */ IEMOP_HLP_NO_LOCK_PREFIX(); IEM_MC_BEGIN(0, 1); @@ -10292,7 +11686,7 @@ FNIEMOP_DEF_1(iemOpCommonMov_Rv_Iv, uint8_t, iReg) FNIEMOP_DEF(iemOp_eAX_Iv) { IEMOP_MNEMONIC("mov rAX,IV"); - return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xAX); + return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xAX | pIemCpu->uRexB); } @@ -10300,7 +11694,7 @@ FNIEMOP_DEF(iemOp_eAX_Iv) FNIEMOP_DEF(iemOp_eCX_Iv) { IEMOP_MNEMONIC("mov rCX,IV"); - return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xCX); + return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xCX | pIemCpu->uRexB); } @@ -10308,7 +11702,7 @@ FNIEMOP_DEF(iemOp_eCX_Iv) FNIEMOP_DEF(iemOp_eDX_Iv) { IEMOP_MNEMONIC("mov rDX,IV"); - return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDX); + return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDX | pIemCpu->uRexB); } @@ -10316,7 +11710,7 @@ FNIEMOP_DEF(iemOp_eDX_Iv) FNIEMOP_DEF(iemOp_eBX_Iv) { IEMOP_MNEMONIC("mov rBX,IV"); - return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBX); + return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBX | pIemCpu->uRexB); } @@ -10324,7 +11718,7 @@ FNIEMOP_DEF(iemOp_eBX_Iv) FNIEMOP_DEF(iemOp_eSP_Iv) { IEMOP_MNEMONIC("mov rSP,IV"); - return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSP); + return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSP | pIemCpu->uRexB); } @@ -10332,7 +11726,7 @@ FNIEMOP_DEF(iemOp_eSP_Iv) FNIEMOP_DEF(iemOp_eBP_Iv) { IEMOP_MNEMONIC("mov rBP,IV"); - return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBP); + return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBP | pIemCpu->uRexB); } @@ -10340,7 +11734,7 @@ FNIEMOP_DEF(iemOp_eBP_Iv) FNIEMOP_DEF(iemOp_eSI_Iv) { IEMOP_MNEMONIC("mov rSI,IV"); - return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSI); + return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSI | pIemCpu->uRexB); } @@ -10348,7 +11742,7 @@ FNIEMOP_DEF(iemOp_eSI_Iv) FNIEMOP_DEF(iemOp_eDI_Iv) { IEMOP_MNEMONIC("mov rDI,IV"); - return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDI); + return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDI | pIemCpu->uRexB); } @@ -10366,7 +11760,7 @@ FNIEMOP_DEF(iemOp_Grp2_Eb_Ib) case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,Ib"); break; case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,Ib"); break; case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,Ib"); break; - case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX(); + case 6: return IEMOP_RAISE_INVALID_OPCODE(); IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */ } IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF); @@ -10396,7 +11790,7 @@ FNIEMOP_DEF(iemOp_Grp2_Eb_Ib) IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift); IEM_MC_ASSIGN(cShiftArg, cShift); IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); @@ -10426,7 +11820,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_Ib) case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,Ib"); break; case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,Ib"); break; case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,Ib"); break; - case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX(); + case 6: return IEMOP_RAISE_INVALID_OPCODE(); IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */ } IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF); @@ -10458,6 +11852,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_Ib) IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); return VINF_SUCCESS; @@ -10490,7 +11885,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_Ib) IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift); IEM_MC_ASSIGN(cShiftArg, cShift); IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); @@ -10510,7 +11905,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_Ib) IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift); IEM_MC_ASSIGN(cShiftArg, cShift); IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); @@ -10530,7 +11925,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_Ib) IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift); IEM_MC_ASSIGN(cShiftArg, cShift); IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); @@ -10573,16 +11968,43 @@ FNIEMOP_DEF(iemOp_retn) /** Opcode 0xc4. */ FNIEMOP_DEF(iemOp_les_Gv_Mp) { + uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); + if ( pIemCpu->enmCpuMode == IEMMODE_64BIT + || (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + IEMOP_MNEMONIC("2-byte-vex"); + /* The LES instruction is invalid 64-bit mode. In legacy and + compatability mode it is invalid with MOD=3. + The use as a VEX prefix is made possible by assigning the inverted + REX.R to the top MOD bit, and the top bit in the inverted register + specifier to the bottom MOD bit, thereby effectively limiting 32-bit + to accessing registers 0..7 in this VEX form. */ + /** @todo VEX: Just use new tables for it. */ + return IEMOP_RAISE_INVALID_OPCODE(); + } IEMOP_MNEMONIC("les Gv,Mp"); - return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_ES); + return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_ES, bRm); } /** Opcode 0xc5. */ FNIEMOP_DEF(iemOp_lds_Gv_Mp) { + uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); + if ( pIemCpu->enmCpuMode == IEMMODE_64BIT + || (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) + { + IEMOP_MNEMONIC("3-byte-vex"); + /* The LDS instruction is invalid 64-bit mode. In legacy and + compatability mode it is invalid with MOD=3. + The use as a VEX prefix is made possible by assigning the inverted + REX.R and REX.X to the two MOD bits, since the REX bits are ignored + outside of 64-bit mode. */ + /** @todo VEX: Just use new tables for it. */ + return IEMOP_RAISE_INVALID_OPCODE(); + } IEMOP_MNEMONIC("lds Gv,Mp"); - return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_DS); + return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_DS, bRm); } @@ -10609,7 +12031,7 @@ FNIEMOP_DEF(iemOp_Grp11_Eb_Ib) /* memory access. */ IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Imm); IEM_MC_ADVANCE_RIP(); @@ -10651,7 +12073,7 @@ FNIEMOP_DEF(iemOp_Grp11_Ev_Iz) case IEMMODE_64BIT: IEM_MC_BEGIN(0, 0); - uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm); + uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm); IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Imm); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); @@ -10668,7 +12090,7 @@ FNIEMOP_DEF(iemOp_Grp11_Ev_Iz) case IEMMODE_16BIT: IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2); uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm); IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Imm); IEM_MC_ADVANCE_RIP(); @@ -10678,7 +12100,7 @@ FNIEMOP_DEF(iemOp_Grp11_Ev_Iz) case IEMMODE_32BIT: IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4); uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm); IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Imm); IEM_MC_ADVANCE_RIP(); @@ -10688,8 +12110,8 @@ FNIEMOP_DEF(iemOp_Grp11_Ev_Iz) case IEMMODE_64BIT: IEM_MC_BEGIN(0, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); - uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4); + uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm); IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Imm); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); @@ -10796,7 +12218,7 @@ FNIEMOP_DEF(iemOp_Grp2_Eb_1) case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,1"); break; case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,1"); break; case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,1"); break; - case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX(); + case 6: return IEMOP_RAISE_INVALID_OPCODE(); IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */ } IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF); @@ -10825,7 +12247,7 @@ FNIEMOP_DEF(iemOp_Grp2_Eb_1) IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_EFLAGS(EFlags); IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags); @@ -10854,7 +12276,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_1) case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,1"); break; case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,1"); break; case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,1"); break; - case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX(); + case 6: return IEMOP_RAISE_INVALID_OPCODE(); IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */ } IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF); @@ -10885,6 +12307,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_1) IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); return VINF_SUCCESS; @@ -10917,7 +12340,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_1) IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_EFLAGS(EFlags); IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags); @@ -10935,7 +12358,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_1) IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_EFLAGS(EFlags); IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); @@ -10953,7 +12376,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_1) IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_EFLAGS(EFlags); IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags); @@ -11014,7 +12437,7 @@ FNIEMOP_DEF(iemOp_Grp2_Eb_CL) IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_EFLAGS(EFlags); @@ -11076,6 +12499,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_CL) IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); IEM_MC_ADVANCE_RIP(); IEM_MC_END(); return VINF_SUCCESS; @@ -11109,7 +12533,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_CL) IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_EFLAGS(EFlags); @@ -11128,7 +12552,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_CL) IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_EFLAGS(EFlags); @@ -11147,7 +12571,7 @@ FNIEMOP_DEF(iemOp_Grp2_Ev_CL) IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_EFLAGS(EFlags); @@ -11412,7 +12836,7 @@ FNIEMOP_DEF_2(iemOpHlpFpu_st0_m32r, uint8_t, bRm, PFNIEMAIMPLFPUR32, pfnAImpl) IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1); IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); @@ -11462,7 +12886,7 @@ FNIEMOP_DEF_1(iemOp_fcom_m32r, uint8_t, bRm) IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1); IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); @@ -11496,7 +12920,7 @@ FNIEMOP_DEF_1(iemOp_fcomp_m32r, uint8_t, bRm) IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1); IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); @@ -11601,7 +13025,7 @@ FNIEMOP_DEF_1(iemOp_fld_m32r, uint8_t, bRm) IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0); IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val, r32Val, 1); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); @@ -11633,7 +13057,7 @@ FNIEMOP_DEF_1(iemOp_fst_m32r, uint8_t, bRm) IEM_MC_ARG(PRTFLOAT32U, pr32Dst, 1); IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); IEM_MC_MAYBE_RAISE_FPU_XCPT(); @@ -11669,7 +13093,7 @@ FNIEMOP_DEF_1(iemOp_fstp_m32r, uint8_t, bRm) IEM_MC_ARG(PRTFLOAT32U, pr32Dst, 1); IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); IEM_MC_MAYBE_RAISE_FPU_XCPT(); @@ -11700,11 +13124,12 @@ FNIEMOP_DEF_1(iemOp_fldenv, uint8_t, bRm) IEMOP_MNEMONIC("fldenv m14/28byte"); IEM_MC_BEGIN(3, 0); IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0); - IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1); + IEM_MC_ARG(uint8_t, iEffSeg, 1); IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); + IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg); IEM_MC_CALL_CIMPL_3(iemCImpl_fldenv, enmEffOpSize, iEffSeg, GCPtrEffSrc); IEM_MC_END(); return VINF_SUCCESS; @@ -11718,7 +13143,7 @@ FNIEMOP_DEF_1(iemOp_fldcw, uint8_t, bRm) IEM_MC_BEGIN(1, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); IEM_MC_ARG(uint16_t, u16Fsw, 0); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); IEM_MC_FETCH_MEM_U16(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc); @@ -11734,11 +13159,12 @@ FNIEMOP_DEF_1(iemOp_fnstenv, uint8_t, bRm) IEMOP_MNEMONIC("fstenv m14/m28byte"); IEM_MC_BEGIN(3, 0); IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0); - IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1); + IEM_MC_ARG(uint8_t, iEffSeg, 1); IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); + IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg); IEM_MC_CALL_CIMPL_3(iemCImpl_fnstenv, enmEffOpSize, iEffSeg, GCPtrEffDst); IEM_MC_END(); return VINF_SUCCESS; @@ -11752,7 +13178,7 @@ FNIEMOP_DEF_1(iemOp_fnstcw, uint8_t, bRm) IEM_MC_BEGIN(2, 0); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); IEM_MC_LOCAL(uint16_t, u16Fcw); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); IEM_MC_FETCH_FCW(u16Fcw); @@ -12532,7 +13958,7 @@ FNIEMOP_DEF_2(iemOpHlpFpu_st0_m32i, uint8_t, bRm, PFNIEMAIMPLFPUI32, pfnAImpl) IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1); IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); @@ -12582,7 +14008,7 @@ FNIEMOP_DEF_1(iemOp_ficom_m32i, uint8_t, bRm) IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1); IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); @@ -12616,7 +14042,7 @@ FNIEMOP_DEF_1(iemOp_ficomp_m32i, uint8_t, bRm) IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1); IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); @@ -12722,7 +14148,7 @@ FNIEMOP_DEF_1(iemOp_fild_m32i, uint8_t, bRm) IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0); IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val, i32Val, 1); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); @@ -12754,7 +14180,7 @@ FNIEMOP_DEF_1(iemOp_fisttp_m32i, uint8_t, bRm) IEM_MC_ARG(int32_t *, pi32Dst, 1); IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); IEM_MC_MAYBE_RAISE_FPU_XCPT(); @@ -12790,7 +14216,7 @@ FNIEMOP_DEF_1(iemOp_fist_m32i, uint8_t, bRm) IEM_MC_ARG(int32_t *, pi32Dst, 1); IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); IEM_MC_MAYBE_RAISE_FPU_XCPT(); @@ -12826,7 +14252,7 @@ FNIEMOP_DEF_1(iemOp_fistp_m32i, uint8_t, bRm) IEM_MC_ARG(int32_t *, pi32Dst, 1); IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); IEM_MC_MAYBE_RAISE_FPU_XCPT(); @@ -12863,7 +14289,7 @@ FNIEMOP_DEF_1(iemOp_fld_m80r, uint8_t, bRm) IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0); IEM_MC_ARG_LOCAL_REF(PCRTFLOAT80U, pr80Val, r80Val, 1); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); @@ -12895,7 +14321,7 @@ FNIEMOP_DEF_1(iemOp_fstp_m80r, uint8_t, bRm) IEM_MC_ARG(PRTFLOAT80U, pr80Dst, 1); IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); IEM_MC_MAYBE_RAISE_FPU_XCPT(); @@ -13276,7 +14702,7 @@ FNIEMOP_DEF_2(iemOpHlpFpu_ST0_m64r, uint8_t, bRm, PFNIEMAIMPLFPUR64, pfnImpl) IEM_MC_ARG(PCRTFLOAT80U, pr80Factor1, 1); IEM_MC_ARG_LOCAL_REF(PRTFLOAT64U, pr64Factor2, r64Factor2, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); IEM_MC_MAYBE_RAISE_FPU_XCPT(); @@ -13325,7 +14751,7 @@ FNIEMOP_DEF_1(iemOp_fcom_m64r, uint8_t, bRm) IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1); IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val2, r64Val2, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); @@ -13359,7 +14785,7 @@ FNIEMOP_DEF_1(iemOp_fcomp_m64r, uint8_t, bRm) IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1); IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val2, r64Val2, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); @@ -13463,7 +14889,7 @@ FNIEMOP_DEF_1(iemOp_fld_m64r, uint8_t, bRm) IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0); IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val, r64Val, 1); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); IEM_MC_MAYBE_RAISE_FPU_XCPT(); @@ -13494,7 +14920,7 @@ FNIEMOP_DEF_1(iemOp_fisttp_m64i, uint8_t, bRm) IEM_MC_ARG(int64_t *, pi64Dst, 1); IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); IEM_MC_MAYBE_RAISE_FPU_XCPT(); @@ -13530,7 +14956,7 @@ FNIEMOP_DEF_1(iemOp_fst_m64r, uint8_t, bRm) IEM_MC_ARG(PRTFLOAT64U, pr64Dst, 1); IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); IEM_MC_MAYBE_RAISE_FPU_XCPT(); @@ -13568,7 +14994,7 @@ FNIEMOP_DEF_1(iemOp_fstp_m64r, uint8_t, bRm) IEM_MC_ARG(PRTFLOAT64U, pr64Dst, 1); IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); IEM_MC_MAYBE_RAISE_FPU_XCPT(); @@ -13596,14 +15022,15 @@ FNIEMOP_DEF_1(iemOp_fstp_m64r, uint8_t, bRm) /** Opcode 0xdd !11/0. */ FNIEMOP_DEF_1(iemOp_frstor, uint8_t, bRm) { - IEMOP_MNEMONIC("fxrstor m94/108byte"); + IEMOP_MNEMONIC("frstor m94/108byte"); IEM_MC_BEGIN(3, 0); IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0); - IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1); + IEM_MC_ARG(uint8_t, iEffSeg, 1); IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); + IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg); IEM_MC_CALL_CIMPL_3(iemCImpl_frstor, enmEffOpSize, iEffSeg, GCPtrEffSrc); IEM_MC_END(); return VINF_SUCCESS; @@ -13616,11 +15043,12 @@ FNIEMOP_DEF_1(iemOp_fnsave, uint8_t, bRm) IEMOP_MNEMONIC("fnsave m94/108byte"); IEM_MC_BEGIN(3, 0); IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0); - IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pIemCpu->iEffSeg, 1); + IEM_MC_ARG(uint8_t, iEffSeg, 1); IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); + IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg); IEM_MC_CALL_CIMPL_3(iemCImpl_fnsave, enmEffOpSize, iEffSeg, GCPtrEffDst); IEM_MC_END(); return VINF_SUCCESS; @@ -13636,10 +15064,10 @@ FNIEMOP_DEF_1(iemOp_fnstsw, uint8_t, bRm) IEM_MC_LOCAL(uint16_t, u16Tmp); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); IEM_MC_FETCH_FSW(u16Tmp); IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp); IEM_MC_ADVANCE_RIP(); @@ -13825,7 +15253,7 @@ FNIEMOP_DEF_2(iemOpHlpFpu_st0_m16i, uint8_t, bRm, PFNIEMAIMPLFPUI16, pfnAImpl) IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1); IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); @@ -13875,7 +15303,7 @@ FNIEMOP_DEF_1(iemOp_ficom_m16i, uint8_t, bRm) IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1); IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); @@ -13909,7 +15337,7 @@ FNIEMOP_DEF_1(iemOp_ficomp_m16i, uint8_t, bRm) IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1); IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); @@ -14073,7 +15501,7 @@ FNIEMOP_DEF_1(iemOp_fisttp_m16i, uint8_t, bRm) IEM_MC_ARG(int16_t *, pi16Dst, 1); IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); IEM_MC_MAYBE_RAISE_FPU_XCPT(); @@ -14109,7 +15537,7 @@ FNIEMOP_DEF_1(iemOp_fist_m16i, uint8_t, bRm) IEM_MC_ARG(int16_t *, pi16Dst, 1); IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); IEM_MC_MAYBE_RAISE_FPU_XCPT(); @@ -14145,7 +15573,7 @@ FNIEMOP_DEF_1(iemOp_fistp_m16i, uint8_t, bRm) IEM_MC_ARG(int16_t *, pi16Dst, 1); IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); IEM_MC_MAYBE_RAISE_FPU_XCPT(); @@ -14191,7 +15619,7 @@ FNIEMOP_DEF_1(iemOp_fistp_m64i, uint8_t, bRm) IEM_MC_ARG(int64_t *, pi64Dst, 1); IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); IEM_MC_MAYBE_RAISE_FPU_XCPT(); @@ -14366,34 +15794,58 @@ FNIEMOP_DEF(iemOp_loop_Jb) { case IEMMODE_16BIT: IEM_MC_BEGIN(0,0); - IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1); - IEM_MC_IF_CX_IS_NZ() { - IEM_MC_REL_JMP_S8(i8Imm); - } IEM_MC_ELSE() { + if (-(int8_t)pIemCpu->offOpcode != i8Imm) + { + IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1); + IEM_MC_IF_CX_IS_NZ() { + IEM_MC_REL_JMP_S8(i8Imm); + } IEM_MC_ELSE() { + IEM_MC_ADVANCE_RIP(); + } IEM_MC_ENDIF(); + } + else + { + IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xCX, 0); IEM_MC_ADVANCE_RIP(); - } IEM_MC_ENDIF(); + } IEM_MC_END(); return VINF_SUCCESS; case IEMMODE_32BIT: IEM_MC_BEGIN(0,0); - IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1); - IEM_MC_IF_ECX_IS_NZ() { - IEM_MC_REL_JMP_S8(i8Imm); - } IEM_MC_ELSE() { + if (-(int8_t)pIemCpu->offOpcode != i8Imm) + { + IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1); + IEM_MC_IF_ECX_IS_NZ() { + IEM_MC_REL_JMP_S8(i8Imm); + } IEM_MC_ELSE() { + IEM_MC_ADVANCE_RIP(); + } IEM_MC_ENDIF(); + } + else + { + IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xCX, 0); IEM_MC_ADVANCE_RIP(); - } IEM_MC_ENDIF(); + } IEM_MC_END(); return VINF_SUCCESS; case IEMMODE_64BIT: IEM_MC_BEGIN(0,0); - IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1); - IEM_MC_IF_RCX_IS_NZ() { - IEM_MC_REL_JMP_S8(i8Imm); - } IEM_MC_ELSE() { + if (-(int8_t)pIemCpu->offOpcode != i8Imm) + { + IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1); + IEM_MC_IF_RCX_IS_NZ() { + IEM_MC_REL_JMP_S8(i8Imm); + } IEM_MC_ELSE() { + IEM_MC_ADVANCE_RIP(); + } IEM_MC_ENDIF(); + } + else + { + IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xCX, 0); IEM_MC_ADVANCE_RIP(); - } IEM_MC_ENDIF(); + } IEM_MC_END(); return VINF_SUCCESS; @@ -14620,6 +16072,7 @@ FNIEMOP_DEF(iemOp_out_DX_eAX) /** Opcode 0xf0. */ FNIEMOP_DEF(iemOp_lock) { + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("lock"); pIemCpu->fPrefixes |= IEM_OP_PRF_LOCK; uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); @@ -14632,6 +16085,7 @@ FNIEMOP_DEF(iemOp_repne) { /* This overrides any previous REPE prefix. */ pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPZ; + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("repne"); pIemCpu->fPrefixes |= IEM_OP_PRF_REPNZ; uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); @@ -14644,6 +16098,7 @@ FNIEMOP_DEF(iemOp_repe) { /* This overrides any previous REPNE prefix. */ pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPNZ; + IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("repe"); pIemCpu->fPrefixes |= IEM_OP_PRF_REPZ; uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); @@ -14700,7 +16155,7 @@ FNIEMOP_DEF_2(iemOpCommonUnaryEb, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_EFLAGS(EFlags); if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)) @@ -14738,7 +16193,7 @@ FNIEMOP_DEF_2(iemOpCommonUnaryEv, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_EFLAGS(EFlags); if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)) @@ -14758,7 +16213,7 @@ FNIEMOP_DEF_2(iemOpCommonUnaryEv, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_EFLAGS(EFlags); if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)) @@ -14778,7 +16233,7 @@ FNIEMOP_DEF_2(iemOpCommonUnaryEv, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); IEM_MC_FETCH_EFLAGS(EFlags); if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)) @@ -14830,7 +16285,7 @@ FNIEMOP_DEF_1(iemOp_grp3_test_Eb, uint8_t, bRm) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); IEM_MC_ASSIGN(u8Src, u8Imm); IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); @@ -14883,6 +16338,7 @@ FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm) IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags); + /* No clearing the high dword here - test doesn't write back the result. */ IEM_MC_ADVANCE_RIP(); IEM_MC_END(); return VINF_SUCCESS; @@ -14919,7 +16375,7 @@ FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2); uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm); IEM_MC_ASSIGN(u16Src, u16Imm); IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); @@ -14941,7 +16397,7 @@ FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4); uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm); IEM_MC_ASSIGN(u32Src, u32Imm); IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); @@ -14963,7 +16419,7 @@ FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm) IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4); uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm); IEM_MC_ASSIGN(u64Src, u64Imm); IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); @@ -14992,15 +16448,22 @@ FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEb, uint8_t, bRm, PFNIEMAIMPLMULDIVU8, pfnU8) { /* register access */ IEMOP_HLP_NO_LOCK_PREFIX(); - IEM_MC_BEGIN(3, 0); + IEM_MC_BEGIN(3, 1); IEM_MC_ARG(uint16_t *, pu16AX, 0); IEM_MC_ARG(uint8_t, u8Value, 1); IEM_MC_ARG(uint32_t *, pEFlags, 2); + IEM_MC_LOCAL(int32_t, rc); + IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX); IEM_MC_REF_EFLAGS(pEFlags); - IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags); - IEM_MC_ADVANCE_RIP(); + IEM_MC_CALL_AIMPL_3(rc, pfnU8, pu16AX, u8Value, pEFlags); + IEM_MC_IF_LOCAL_IS_Z(rc) { + IEM_MC_ADVANCE_RIP(); + } IEM_MC_ELSE() { + IEM_MC_RAISE_DIVIDE_ERROR(); + } IEM_MC_ENDIF(); + IEM_MC_END(); } else @@ -15008,19 +16471,24 @@ FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEb, uint8_t, bRm, PFNIEMAIMPLMULDIVU8, pfnU8) /* memory access. */ IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */ - IEM_MC_BEGIN(3, 1); + IEM_MC_BEGIN(3, 2); IEM_MC_ARG(uint16_t *, pu16AX, 0); IEM_MC_ARG(uint8_t, u8Value, 1); IEM_MC_ARG(uint32_t *, pEFlags, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); + IEM_MC_LOCAL(int32_t, rc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX); IEM_MC_REF_EFLAGS(pEFlags); - IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags); + IEM_MC_CALL_AIMPL_3(rc, pfnU8, pu16AX, u8Value, pEFlags); + IEM_MC_IF_LOCAL_IS_Z(rc) { + IEM_MC_ADVANCE_RIP(); + } IEM_MC_ELSE() { + IEM_MC_RAISE_DIVIDE_ERROR(); + } IEM_MC_ENDIF(); - IEM_MC_ADVANCE_RIP(); IEM_MC_END(); } return VINF_SUCCESS; @@ -15079,6 +16547,8 @@ FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl) IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags); IEM_MC_IF_LOCAL_IS_Z(rc) { + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32AX); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32DX); IEM_MC_ADVANCE_RIP(); } IEM_MC_ELSE() { IEM_MC_RAISE_DIVIDE_ERROR(); @@ -15132,7 +16602,7 @@ FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl) IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); IEM_MC_LOCAL(int32_t, rc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX); IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX); @@ -15159,13 +16629,15 @@ FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl) IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); IEM_MC_LOCAL(int32_t, rc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX); IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX); IEM_MC_REF_EFLAGS(pEFlags); IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags); IEM_MC_IF_LOCAL_IS_Z(rc) { + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32AX); + IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32DX); IEM_MC_ADVANCE_RIP(); } IEM_MC_ELSE() { IEM_MC_RAISE_DIVIDE_ERROR(); @@ -15186,7 +16658,7 @@ FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl) IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); IEM_MC_LOCAL(int32_t, rc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst); IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX); IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX); @@ -15420,7 +16892,7 @@ FNIEMOP_DEF_1(iemOp_Grp5_calln_Ev, uint8_t, bRm) IEM_MC_BEGIN(1, 1); IEM_MC_ARG(uint16_t, u16Target, 0); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc); IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target); IEM_MC_END() @@ -15430,7 +16902,7 @@ FNIEMOP_DEF_1(iemOp_Grp5_calln_Ev, uint8_t, bRm) IEM_MC_BEGIN(1, 1); IEM_MC_ARG(uint32_t, u32Target, 0); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc); IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target); IEM_MC_END() @@ -15440,7 +16912,7 @@ FNIEMOP_DEF_1(iemOp_Grp5_calln_Ev, uint8_t, bRm) IEM_MC_BEGIN(1, 1); IEM_MC_ARG(uint64_t, u64Target, 0); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc); IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target); IEM_MC_END() @@ -15468,7 +16940,7 @@ FNIEMOP_DEF_2(iemOpHlp_Grp5_far_Ep, uint8_t, bRm, FNIEMCIMPLFARBRANCH *, pfnCImp IEM_MC_ARG(uint16_t, offSeg, 1); IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc); IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2); @@ -15476,13 +16948,34 @@ FNIEMOP_DEF_2(iemOpHlp_Grp5_far_Ep, uint8_t, bRm, FNIEMCIMPLFARBRANCH *, pfnCImp IEM_MC_END(); return VINF_SUCCESS; + case IEMMODE_64BIT: + /** @todo testcase: AMD does not seem to believe in the case (see bs-cpu-xcpt-1) + * and will apparently ignore REX.W, at least for the jmp far qword [rsp] + * and call far qword [rsp] encodings. */ + if (!IEM_IS_GUEST_CPU_AMD(pIemCpu)) + { + IEM_MC_BEGIN(3, 1); + IEM_MC_ARG(uint16_t, u16Sel, 0); + IEM_MC_ARG(uint64_t, offSeg, 1); + IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2); + IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); + IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc); + IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8); + IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize); + IEM_MC_END(); + return VINF_SUCCESS; + } + /* AMD falls thru. */ + case IEMMODE_32BIT: IEM_MC_BEGIN(3, 1); IEM_MC_ARG(uint16_t, u16Sel, 0); IEM_MC_ARG(uint32_t, offSeg, 1); IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc); IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4); @@ -15490,20 +16983,6 @@ FNIEMOP_DEF_2(iemOpHlp_Grp5_far_Ep, uint8_t, bRm, FNIEMCIMPLFARBRANCH *, pfnCImp IEM_MC_END(); return VINF_SUCCESS; - case IEMMODE_64BIT: - IEM_MC_BEGIN(3, 1); - IEM_MC_ARG(uint16_t, u16Sel, 0); - IEM_MC_ARG(uint64_t, offSeg, 1); - IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2); - IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); - IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); - IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc); - IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8); - IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize); - IEM_MC_END(); - return VINF_SUCCESS; - IEM_NOT_REACHED_DEFAULT_CASE_RET(); } } @@ -15564,14 +17043,14 @@ FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm) } else { - /* The new RIP is taken from a register. */ + /* The new RIP is taken from a memory location. */ switch (pIemCpu->enmEffOpSize) { case IEMMODE_16BIT: IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint16_t, u16Target); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc); IEM_MC_SET_RIP_U16(u16Target); IEM_MC_END() @@ -15581,7 +17060,7 @@ FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint32_t, u32Target); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc); IEM_MC_SET_RIP_U32(u32Target); IEM_MC_END() @@ -15589,11 +17068,11 @@ FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm) case IEMMODE_64BIT: IEM_MC_BEGIN(0, 2); - IEM_MC_LOCAL(uint32_t, u32Target); + IEM_MC_LOCAL(uint64_t, u64Target); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); - IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc); - IEM_MC_SET_RIP_U32(u32Target); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); + IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc); + IEM_MC_SET_RIP_U64(u64Target); IEM_MC_END() return VINF_SUCCESS; @@ -15609,8 +17088,7 @@ FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm) */ FNIEMOP_DEF_1(iemOp_Grp5_jmpf_Ep, uint8_t, bRm) { - IEMOP_MNEMONIC("jmp Ep"); - IEMOP_HLP_NO_64BIT(); + IEMOP_MNEMONIC("jmpf Ep"); return FNIEMOP_CALL_2(iemOpHlp_Grp5_far_Ep, bRm, iemCImpl_FarJmp); } @@ -15636,7 +17114,7 @@ FNIEMOP_DEF_1(iemOp_Grp5_push_Ev, uint8_t, bRm) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint16_t, u16Src); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffSrc); IEM_MC_PUSH_U16(u16Src); IEM_MC_ADVANCE_RIP(); @@ -15647,7 +17125,7 @@ FNIEMOP_DEF_1(iemOp_Grp5_push_Ev, uint8_t, bRm) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint32_t, u32Src); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffSrc); IEM_MC_PUSH_U32(u32Src); IEM_MC_ADVANCE_RIP(); @@ -15658,7 +17136,7 @@ FNIEMOP_DEF_1(iemOp_Grp5_push_Ev, uint8_t, bRm) IEM_MC_BEGIN(0, 2); IEM_MC_LOCAL(uint64_t, u64Src); IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); - IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); + IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffSrc); IEM_MC_PUSH_U64(u64Src); IEM_MC_ADVANCE_RIP(); @@ -15727,7 +17205,7 @@ const PFNIEMOP g_apfnOneByteMap[256] = /* 0x54 */ iemOp_push_eSP, iemOp_push_eBP, iemOp_push_eSI, iemOp_push_eDI, /* 0x58 */ iemOp_pop_eAX, iemOp_pop_eCX, iemOp_pop_eDX, iemOp_pop_eBX, /* 0x5c */ iemOp_pop_eSP, iemOp_pop_eBP, iemOp_pop_eSI, iemOp_pop_eDI, - /* 0x60 */ iemOp_pusha, iemOp_popa, iemOp_bound_Gv_Ma, iemOp_arpl_Ew_Gw, + /* 0x60 */ iemOp_pusha, iemOp_popa, iemOp_bound_Gv_Ma, iemOp_arpl_Ew_Gw_movsx_Gv_Ev, /* 0x64 */ iemOp_seg_FS, iemOp_seg_GS, iemOp_op_size, iemOp_addr_size, /* 0x68 */ iemOp_push_Iz, iemOp_imul_Gv_Ev_Iz, iemOp_push_Ib, iemOp_imul_Gv_Ev_Ib, /* 0x6c */ iemOp_insb_Yb_DX, iemOp_inswd_Yv_DX, iemOp_outsb_Yb_DX, iemOp_outswd_Yv_DX, diff --git a/src/VBox/VMM/VMMAll/IOMAll.cpp b/src/VBox/VMM/VMMAll/IOMAll.cpp index 7c6e5cc1..06f7b0c5 100644 --- a/src/VBox/VMM/VMMAll/IOMAll.cpp +++ b/src/VBox/VMM/VMMAll/IOMAll.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2011 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -40,14 +40,19 @@ /** - * Check if this VCPU currently owns the IOM lock. + * Check if this VCPU currently owns the IOM lock exclusively. * * @returns bool owner/not owner * @param pVM Pointer to the VM. */ -VMMDECL(bool) IOMIsLockOwner(PVM pVM) +VMMDECL(bool) IOMIsLockWriteOwner(PVM pVM) { +#ifdef IOM_WITH_CRIT_SECT_RW + return PDMCritSectRwIsInitialized(&pVM->iom.s.CritSect) + && PDMCritSectRwIsWriteOwner(&pVM->iom.s.CritSect); +#else return PDMCritSectIsOwner(&pVM->iom.s.CritSect); +#endif } @@ -215,16 +220,17 @@ bool iomSaveDataToReg(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegF * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only) * * @param pVM Pointer to the VM. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param Port The port to read. * @param pu32Value Where to store the value read. * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes. */ -VMMDECL(VBOXSTRICTRC) IOMIOPortRead(PVM pVM, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue) +VMMDECL(VBOXSTRICTRC) IOMIOPortRead(PVM pVM, PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue) { /** @todo should initialize *pu32Value here because it can happen that some * handle is buggy and doesn't handle all cases. */ /* Take the IOM lock before performing any device I/O. */ - int rc2 = IOM_LOCK(pVM); + int rc2 = IOM_LOCK_SHARED(pVM); #ifndef IN_RING3 if (rc2 == VERR_SEM_BUSY) return VINF_IOM_R3_IOPORT_READ; @@ -238,25 +244,25 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortRead(PVM pVM, RTIOPORT Port, uint32_t *pu32Value, /* * Get the statistics record. */ - PIOMIOPORTSTATS pStats = pVM->iom.s.CTX_SUFF(pStatsLastRead); + PIOMIOPORTSTATS pStats = pVCpu->iom.s.CTX_SUFF(pStatsLastRead); if (!pStats || pStats->Core.Key != Port) { pStats = (PIOMIOPORTSTATS)RTAvloIOPortGet(&pVM->iom.s.CTX_SUFF(pTrees)->IOPortStatTree, Port); if (pStats) - pVM->iom.s.CTX_SUFF(pStatsLastRead) = pStats; + pVCpu->iom.s.CTX_SUFF(pStatsLastRead) = pStats; } #endif /* * Get handler for current context. */ - CTX_SUFF(PIOMIOPORTRANGE) pRange = pVM->iom.s.CTX_SUFF(pRangeLastRead); + CTX_SUFF(PIOMIOPORTRANGE) pRange = pVCpu->iom.s.CTX_SUFF(pRangeLastRead); if ( !pRange || (unsigned)Port - (unsigned)pRange->Port >= (unsigned)pRange->cPorts) { pRange = iomIOPortGetRange(pVM, Port); if (pRange) - pVM->iom.s.CTX_SUFF(pRangeLastRead) = pRange; + pVCpu->iom.s.CTX_SUFF(pRangeLastRead) = pRange; } MMHYPER_RC_ASSERT_RCPTR(pVM, pRange); if (pRange) @@ -269,13 +275,13 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortRead(PVM pVM, RTIOPORT Port, uint32_t *pu32Value, if (!pfnInCallback) { STAM_STATS({ if (pStats) STAM_COUNTER_INC(&pStats->InRZToR3); }); - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); return VINF_IOM_R3_IOPORT_READ; } #endif void *pvUser = pRange->pvUser; PPDMDEVINS pDevIns = pRange->pDevIns; - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); /* * Call the device. @@ -335,7 +341,7 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortRead(PVM pVM, RTIOPORT Port, uint32_t *pu32Value, if (pStats) STAM_COUNTER_INC(&pStats->InRZToR3); # endif - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); return VINF_IOM_R3_IOPORT_READ; } #endif @@ -346,18 +352,6 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortRead(PVM pVM, RTIOPORT Port, uint32_t *pu32Value, #ifdef VBOX_WITH_STATISTICS if (pStats) STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(In)); - else - { -# ifndef IN_RING3 - /* Ring-3 will have to create the statistics record. */ - IOM_UNLOCK(pVM); - return VINF_IOM_R3_IOPORT_READ; -# else - pStats = iomR3IOPortStatsCreate(pVM, Port, NULL); - if (pStats) - STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(In)); -# endif - } #endif /* make return value */ @@ -368,11 +362,11 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortRead(PVM pVM, RTIOPORT Port, uint32_t *pu32Value, case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break; default: AssertMsgFailed(("Invalid I/O port size %d. Port=%d\n", cbValue, Port)); - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); return VERR_IOM_INVALID_IOPORT_SIZE; } Log3(("IOMIOPortRead: Port=%RTiop *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", Port, *pu32Value, cbValue)); - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); return VINF_SUCCESS; } @@ -388,15 +382,17 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortRead(PVM pVM, RTIOPORT Port, uint32_t *pu32Value, * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only) * * @param pVM Pointer to the VM. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param Port The port to read. * @param pGCPtrDst Pointer to the destination buffer (GC, incremented appropriately). * @param pcTransfers Pointer to the number of transfer units to read, on return remaining transfer units. * @param cb Size of the transfer unit (1, 2 or 4 bytes). */ -VMMDECL(VBOXSTRICTRC) IOMIOPortReadString(PVM pVM, RTIOPORT Port, PRTGCPTR pGCPtrDst, PRTGCUINTREG pcTransfers, unsigned cb) +VMMDECL(VBOXSTRICTRC) IOMIOPortReadString(PVM pVM, PVMCPU pVCpu, RTIOPORT Port, + PRTGCPTR pGCPtrDst, PRTGCUINTREG pcTransfers, unsigned cb) { /* Take the IOM lock before performing any device I/O. */ - int rc2 = IOM_LOCK(pVM); + int rc2 = IOM_LOCK_SHARED(pVM); #ifndef IN_RING3 if (rc2 == VERR_SEM_BUSY) return VINF_IOM_R3_IOPORT_READ; @@ -413,25 +409,25 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortReadString(PVM pVM, RTIOPORT Port, PRTGCPTR pGCPt /* * Get the statistics record. */ - PIOMIOPORTSTATS pStats = pVM->iom.s.CTX_SUFF(pStatsLastRead); + PIOMIOPORTSTATS pStats = pVCpu->iom.s.CTX_SUFF(pStatsLastRead); if (!pStats || pStats->Core.Key != Port) { pStats = (PIOMIOPORTSTATS)RTAvloIOPortGet(&pVM->iom.s.CTX_SUFF(pTrees)->IOPortStatTree, Port); if (pStats) - pVM->iom.s.CTX_SUFF(pStatsLastRead) = pStats; + pVCpu->iom.s.CTX_SUFF(pStatsLastRead) = pStats; } #endif /* * Get handler for current context. */ - CTX_SUFF(PIOMIOPORTRANGE) pRange = pVM->iom.s.CTX_SUFF(pRangeLastRead); + CTX_SUFF(PIOMIOPORTRANGE) pRange = pVCpu->iom.s.CTX_SUFF(pRangeLastRead); if ( !pRange || (unsigned)Port - (unsigned)pRange->Port >= (unsigned)pRange->cPorts) { pRange = iomIOPortGetRange(pVM, Port); if (pRange) - pVM->iom.s.CTX_SUFF(pRangeLastRead) = pRange; + pVCpu->iom.s.CTX_SUFF(pRangeLastRead) = pRange; } MMHYPER_RC_ASSERT_RCPTR(pVM, pRange); if (pRange) @@ -444,13 +440,13 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortReadString(PVM pVM, RTIOPORT Port, PRTGCPTR pGCPt if (!pfnInStrCallback) { STAM_STATS({ if (pStats) STAM_COUNTER_INC(&pStats->InRZToR3); }); - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); return VINF_IOM_R3_IOPORT_READ; } #endif void *pvUser = pRange->pvUser; PPDMDEVINS pDevIns = pRange->pDevIns; - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); /* * Call the device. @@ -497,7 +493,7 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortReadString(PVM pVM, RTIOPORT Port, PRTGCPTR pGCPt if (pStats) STAM_COUNTER_INC(&pStats->InRZToR3); # endif - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); return VINF_IOM_R3_IOPORT_READ; } #endif @@ -508,23 +504,11 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortReadString(PVM pVM, RTIOPORT Port, PRTGCPTR pGCPt #ifdef VBOX_WITH_STATISTICS if (pStats) STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(In)); - else - { -# ifndef IN_RING3 - /* Ring-3 will have to create the statistics record. */ - IOM_UNLOCK(pVM); - return VINF_IOM_R3_IOPORT_READ; -# else - pStats = iomR3IOPortStatsCreate(pVM, Port, NULL); - if (pStats) - STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(In)); -# endif - } #endif Log3(("IOMIOPortReadStr: Port=%RTiop pGCPtrDst=%p pcTransfer=%p:{%#x->%#x} cb=%d rc=VINF_SUCCESS\n", Port, pGCPtrDst, pcTransfers, cTransfers, *pcTransfers, cb)); - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); return VINF_SUCCESS; } @@ -540,14 +524,15 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortReadString(PVM pVM, RTIOPORT Port, PRTGCPTR pGCPt * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only) * * @param pVM Pointer to the VM. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param Port The port to write to. * @param u32Value The value to write. * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes. */ -VMMDECL(VBOXSTRICTRC) IOMIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue) +VMMDECL(VBOXSTRICTRC) IOMIOPortWrite(PVM pVM, PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue) { /* Take the IOM lock before performing any device I/O. */ - int rc2 = IOM_LOCK(pVM); + int rc2 = IOM_LOCK_SHARED(pVM); #ifndef IN_RING3 if (rc2 == VERR_SEM_BUSY) return VINF_IOM_R3_IOPORT_WRITE; @@ -563,25 +548,25 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, /* * Find the statistics record. */ - PIOMIOPORTSTATS pStats = pVM->iom.s.CTX_SUFF(pStatsLastWrite); + PIOMIOPORTSTATS pStats = pVCpu->iom.s.CTX_SUFF(pStatsLastWrite); if (!pStats || pStats->Core.Key != Port) { pStats = (PIOMIOPORTSTATS)RTAvloIOPortGet(&pVM->iom.s.CTX_SUFF(pTrees)->IOPortStatTree, Port); if (pStats) - pVM->iom.s.CTX_SUFF(pStatsLastWrite) = pStats; + pVCpu->iom.s.CTX_SUFF(pStatsLastWrite) = pStats; } #endif /* * Get handler for current context. */ - CTX_SUFF(PIOMIOPORTRANGE) pRange = pVM->iom.s.CTX_SUFF(pRangeLastWrite); + CTX_SUFF(PIOMIOPORTRANGE) pRange = pVCpu->iom.s.CTX_SUFF(pRangeLastWrite); if ( !pRange || (unsigned)Port - (unsigned)pRange->Port >= (unsigned)pRange->cPorts) { pRange = iomIOPortGetRange(pVM, Port); if (pRange) - pVM->iom.s.CTX_SUFF(pRangeLastWrite) = pRange; + pVCpu->iom.s.CTX_SUFF(pRangeLastWrite) = pRange; } MMHYPER_RC_ASSERT_RCPTR(pVM, pRange); if (pRange) @@ -594,13 +579,13 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, if (!pfnOutCallback) { STAM_STATS({ if (pStats) STAM_COUNTER_INC(&pStats->OutRZToR3); }); - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); return VINF_IOM_R3_IOPORT_WRITE; } #endif void *pvUser = pRange->pvUser; PPDMDEVINS pDevIns = pRange->pDevIns; - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); /* * Call the device. @@ -646,7 +631,7 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, if (pStats) STAM_COUNTER_INC(&pStats->OutRZToR3); # endif - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); return VINF_IOM_R3_IOPORT_WRITE; } #endif @@ -658,21 +643,9 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, /* statistics. */ if (pStats) STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Out)); - else - { -# ifndef IN_RING3 - /* R3 will have to create the statistics record. */ - IOM_UNLOCK(pVM); - return VINF_IOM_R3_IOPORT_WRITE; -# else - pStats = iomR3IOPortStatsCreate(pVM, Port, NULL); - if (pStats) - STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Out)); -# endif - } #endif Log3(("IOMIOPortWrite: Port=%RTiop u32=%08RX32 cb=%d nop\n", Port, u32Value, cbValue)); - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); return VINF_SUCCESS; } @@ -688,15 +661,17 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only) * * @param pVM Pointer to the VM. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param Port The port to write. * @param pGCPtrSrc Pointer to the source buffer (GC, incremented appropriately). * @param pcTransfers Pointer to the number of transfer units to write, on return remaining transfer units. * @param cb Size of the transfer unit (1, 2 or 4 bytes). - * */ -VMMDECL(VBOXSTRICTRC) IOMIOPortWriteString(PVM pVM, RTIOPORT Port, PRTGCPTR pGCPtrSrc, PRTGCUINTREG pcTransfers, unsigned cb) + */ +VMMDECL(VBOXSTRICTRC) IOMIOPortWriteString(PVM pVM, PVMCPU pVCpu, RTIOPORT Port, + PRTGCPTR pGCPtrSrc, PRTGCUINTREG pcTransfers, unsigned cb) { /* Take the IOM lock before performing any device I/O. */ - int rc2 = IOM_LOCK(pVM); + int rc2 = IOM_LOCK_SHARED(pVM); #ifndef IN_RING3 if (rc2 == VERR_SEM_BUSY) return VINF_IOM_R3_IOPORT_WRITE; @@ -713,25 +688,25 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortWriteString(PVM pVM, RTIOPORT Port, PRTGCPTR pGCP /* * Get the statistics record. */ - PIOMIOPORTSTATS pStats = pVM->iom.s.CTX_SUFF(pStatsLastWrite); + PIOMIOPORTSTATS pStats = pVCpu->iom.s.CTX_SUFF(pStatsLastWrite); if (!pStats || pStats->Core.Key != Port) { pStats = (PIOMIOPORTSTATS)RTAvloIOPortGet(&pVM->iom.s.CTX_SUFF(pTrees)->IOPortStatTree, Port); if (pStats) - pVM->iom.s.CTX_SUFF(pStatsLastWrite) = pStats; + pVCpu->iom.s.CTX_SUFF(pStatsLastWrite) = pStats; } #endif /* * Get handler for current context. */ - CTX_SUFF(PIOMIOPORTRANGE) pRange = pVM->iom.s.CTX_SUFF(pRangeLastWrite); + CTX_SUFF(PIOMIOPORTRANGE) pRange = pVCpu->iom.s.CTX_SUFF(pRangeLastWrite); if ( !pRange || (unsigned)Port - (unsigned)pRange->Port >= (unsigned)pRange->cPorts) { pRange = iomIOPortGetRange(pVM, Port); if (pRange) - pVM->iom.s.CTX_SUFF(pRangeLastWrite) = pRange; + pVCpu->iom.s.CTX_SUFF(pRangeLastWrite) = pRange; } MMHYPER_RC_ASSERT_RCPTR(pVM, pRange); if (pRange) @@ -744,13 +719,13 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortWriteString(PVM pVM, RTIOPORT Port, PRTGCPTR pGCP if (!pfnOutStrCallback) { STAM_STATS({ if (pStats) STAM_COUNTER_INC(&pStats->OutRZToR3); }); - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); return VINF_IOM_R3_IOPORT_WRITE; } #endif void *pvUser = pRange->pvUser; PPDMDEVINS pDevIns = pRange->pDevIns; - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); /* * Call the device. @@ -797,7 +772,7 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortWriteString(PVM pVM, RTIOPORT Port, PRTGCPTR pGCP if (pStats) STAM_COUNTER_INC(&pStats->OutRZToR3); # endif - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); return VINF_IOM_R3_IOPORT_WRITE; } #endif @@ -808,23 +783,11 @@ VMMDECL(VBOXSTRICTRC) IOMIOPortWriteString(PVM pVM, RTIOPORT Port, PRTGCPTR pGCP #ifdef VBOX_WITH_STATISTICS if (pStats) STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Out)); - else - { -# ifndef IN_RING3 - /* Ring-3 will have to create the statistics record. */ - IOM_UNLOCK(pVM); - return VINF_IOM_R3_IOPORT_WRITE; -# else - pStats = iomR3IOPortStatsCreate(pVM, Port, NULL); - if (pStats) - STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Out)); -# endif - } #endif Log3(("IOMIOPortWriteStr: Port=%RTiop pGCPtrSrc=%p pcTransfer=%p:{%#x->%#x} cb=%d rc=VINF_SUCCESS\n", Port, pGCPtrSrc, pcTransfers, cTransfers, *pcTransfers, cb)); - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); return VINF_SUCCESS; } @@ -942,10 +905,11 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretCheckPortIOAccess(PVM pVM, PCPUMCTXCORE pCtxCo * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr) * * @param pVM The virtual machine (GC pointer of course). + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure. * @param pCpu Disassembler CPU state. */ -VMMDECL(VBOXSTRICTRC) IOMInterpretIN(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu) +VMMDECL(VBOXSTRICTRC) IOMInterpretIN(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu) { #ifdef IN_RC STAM_COUNTER_INC(&pVM->iom.s.StatInstIn); @@ -969,7 +933,7 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretIN(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTA * Attempt to read the port. */ uint32_t u32Data = UINT32_C(0xffffffff); - rcStrict = IOMIOPortRead(pVM, uPort, &u32Data, cbSize); + rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &u32Data, cbSize); if (IOM_SUCCESS(rcStrict)) { /* @@ -1002,10 +966,11 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretIN(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTA * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr) * * @param pVM The virtual machine (GC pointer of course). + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure. * @param pCpu Disassembler CPU state. */ -VMMDECL(VBOXSTRICTRC) IOMInterpretOUT(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu) +VMMDECL(VBOXSTRICTRC) IOMInterpretOUT(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu) { #ifdef IN_RC STAM_COUNTER_INC(&pVM->iom.s.StatInstOut); @@ -1030,7 +995,7 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretOUT(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUST /* * Attempt to write to the port. */ - rcStrict = IOMIOPortWrite(pVM, uPort, u64Data, cbSize); + rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, u64Data, cbSize); AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); } else diff --git a/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp b/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp index ba4133e6..32cc2360 100644 --- a/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp +++ b/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2010 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -34,7 +34,7 @@ #include "IOMInternal.h" #include <VBox/vmm/vm.h> #include <VBox/vmm/vmm.h> -#include <VBox/vmm/hwaccm.h> +#include <VBox/vmm/hm.h> #include "IOMInline.h" #include <VBox/dis.h> @@ -77,7 +77,7 @@ static const unsigned g_aSize2Shift[] = /** * Deals with complicated MMIO writes. * - * Complicatd means unaligned or non-dword/qword align accesses depending on + * Complicated means unaligned or non-dword/qword sized accesses depending on * the MMIO region's access mode flags. * * @returns Strict VBox status code. Any EM scheduling status code, @@ -97,7 +97,8 @@ static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTG VERR_IOM_MMIO_IPE_1); AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2); RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart); - bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) >= IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING; + bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING + || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING; /* * Do debug stop if requested. @@ -117,6 +118,19 @@ static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTG } #endif + /* + * Check if we should ignore the write. + */ + if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD) + { + Assert(cbValue != 4 || (GCPhys & 3)); + return VINF_SUCCESS; + } + if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD) + { + Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1))); + return VINF_SUCCESS; + } /* * Split and conquer. @@ -150,8 +164,12 @@ static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTG case VINF_IOM_R3_MMIO_READ_WRITE: case VINF_IOM_R3_MMIO_WRITE: /** @todo What if we've split a transfer and already read - * something? Since reads can have sideeffects we could be - * kind of screwed here... */ + * something? Since writes generally have sideeffects we + * could be kind of screwed here... + * + * Fix: Save the current state and resume it in ring-3. Requires EM to not go + * to REM for MMIO accesses (like may currently do). */ + LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2)); return rc2; default: @@ -197,7 +215,7 @@ static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTG if (offAccess) { u32GivenValue <<= offAccess * 8; - u32GivenMask <<= offAccess * 8; + u32GivenMask <<= offAccess * 8; } uint32_t u32Value = (u32MissingValue & ~u32GivenMask) @@ -217,7 +235,10 @@ static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTG case VINF_IOM_R3_MMIO_WRITE: /** @todo What if we've split a transfer and already read * something? Since reads can have sideeffects we could be - * kind of screwed here... */ + * kind of screwed here... + * + * Fix: Save the current state and resume it in ring-3. Requires EM to not go + * to REM for MMIO accesses (like may currently do). */ LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2)); return rc2; default: @@ -252,20 +273,28 @@ static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTG * Wrapper which does the write and updates range statistics when such are enabled. * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE! */ -static int iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb) +static int iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb) { #ifdef VBOX_WITH_STATISTICS - PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhysFault, pRange); - Assert(pStats); + int rcSem = IOM_LOCK_SHARED(pVM); + if (rcSem == VERR_SEM_BUSY) + return VINF_IOM_R3_MMIO_WRITE; + PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange); + if (!pStats) +# ifdef IN_RING3 + return VERR_NO_MEMORY; +# else + return VINF_IOM_R3_MMIO_WRITE; +# endif + STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a); #endif - STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a); VBOXSTRICTRC rc; if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback))) { if ( (cb == 4 && !(GCPhysFault & 3)) || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU - || (cb == 8 && !(GCPhysFault & 7)) ) + || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) ) rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */ else @@ -273,6 +302,7 @@ static int iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, c } else rc = VINF_SUCCESS; + STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a); STAM_COUNTER_INC(&pStats->Accesses); return VBOXSTRICTRC_TODO(rc); @@ -459,20 +489,31 @@ static int iomMMIODoRead00s(void *pvValue, size_t cbValue) /** * Wrapper which does the read and updates range statistics when such are enabled. */ -DECLINLINE(int) iomMMIODoRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue) +DECLINLINE(int) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue) { #ifdef VBOX_WITH_STATISTICS - PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange); - Assert(pStats); + int rcSem = IOM_LOCK_SHARED(pVM); + if (rcSem == VERR_SEM_BUSY) + return VINF_IOM_R3_MMIO_READ; + PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange); + if (!pStats) +# ifdef IN_RING3 + return VERR_NO_MEMORY; +# else + return VINF_IOM_R3_MMIO_READ; +# endif STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); #endif VBOXSTRICTRC rc; if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback))) { - if ( (cbValue == 4 && !(GCPhys & 3)) + if ( ( cbValue == 4 + && !(GCPhys & 3)) || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU - || (cbValue == 8 && !(GCPhys & 7)) ) + || ( cbValue == 8 + && !(GCPhys & 7) + && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) ) rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue); else rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue); @@ -487,6 +528,7 @@ DECLINLINE(int) iomMMIODoRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, vo case VINF_IOM_MMIO_UNUSED_00: rc = iomMMIODoRead00s(pvValue, cbValue); break; } } + STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a); STAM_COUNTER_INC(&pStats->Accesses); return VBOXSTRICTRC_VAL(rc); @@ -532,12 +574,14 @@ DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb) * @returns VBox status code. * * @param pVM The virtual machine. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure. * @param pCpu Disassembler CPU state. * @param pRange Pointer MMIO range. * @param GCPhysFault The GC physical address corresponding to pvFault. */ -static int iomInterpretMOVxXRead(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault) +static int iomInterpretMOVxXRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, + PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault) { Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3); @@ -549,7 +593,7 @@ static int iomInterpretMOVxXRead(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE p AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb)); uint64_t u64Data = 0; - int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &u64Data, cb); + int rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb); if (rc == VINF_SUCCESS) { /* @@ -591,12 +635,14 @@ static int iomInterpretMOVxXRead(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE p * @returns VBox status code. * * @param pVM The virtual machine. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure. * @param pCpu Disassembler CPU state. * @param pRange Pointer MMIO range. * @param GCPhysFault The GC physical address corresponding to pvFault. */ -static int iomInterpretMOVxXWrite(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault) +static int iomInterpretMOVxXWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, + PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault) { Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3); @@ -609,7 +655,7 @@ static int iomInterpretMOVxXWrite(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE bool fRc = iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &u64Data, &cb); AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc); - int rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &u64Data, cb); + int rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb); if (rc == VINF_SUCCESS) iomMMIOStatLength(pVM, cb); return rc; @@ -925,12 +971,14 @@ static uint64_t iomDisModeToMask(DISCPUMODE enmCpuMode) * @returns VBox status code. * * @param pVM The virtual machine. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param pRegFrame Trap register frame. * @param GCPhysFault The GC physical address corresponding to pvFault. * @param pCpu Disassembler CPU state. * @param pRange Pointer MMIO range. */ -static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange) +static int iomInterpretSTOS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, + PDISCPUSTATE pCpu, PIOMMMIORANGE pRange) { /* * We do not support segment prefixes or REPNE.. @@ -946,7 +994,7 @@ static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFaul if (pCpu->fPrefix & DISPREFIX_REP) { #ifndef IN_RC - if ( CPUMIsGuestIn64BitCode(VMMGetCpu(pVM)) + if ( CPUMIsGuestIn64BitCode(pVCpu) && pRegFrame->rcx >= _4G) return VINF_EM_RAW_EMULATE_INSTR; #endif @@ -1021,7 +1069,7 @@ static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFaul /* fill loop. */ do { - rc = iomMMIODoWrite(pVM, pRange, Phys, &u64Data, cb); + rc = iomMMIODoWrite(pVM, pVCpu, pRange, Phys, &u64Data, cb); if (rc != VINF_SUCCESS) break; @@ -1057,12 +1105,14 @@ static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFaul * @returns VBox status code. * * @param pVM The virtual machine. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param pRegFrame Trap register frame. * @param GCPhysFault The GC physical address corresponding to pvFault. * @param pCpu Disassembler CPU state. * @param pRange Pointer MMIO range. */ -static int iomInterpretLODS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange) +static int iomInterpretLODS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, + PIOMMMIORANGE pRange) { Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3); @@ -1082,7 +1132,7 @@ static int iomInterpretLODS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFaul /* * Perform read. */ - int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &pRegFrame->rax, cb); + int rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &pRegFrame->rax, cb); if (rc == VINF_SUCCESS) { uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode); @@ -1114,7 +1164,8 @@ static int iomInterpretLODS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFaul * @param pCpu Disassembler CPU state. * @param pRange Pointer MMIO range. */ -static int iomInterpretCMP(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange) +static int iomInterpretCMP(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, + PIOMMMIORANGE pRange) { Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3); @@ -1127,10 +1178,10 @@ static int iomInterpretCMP(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault int rc; if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb)) /* cmp reg, [MMIO]. */ - rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb); + rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb); else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb)) /* cmp [MMIO], reg|imm. */ - rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb); + rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb); else { AssertMsgFailed(("Disassember CMP problem..\n")); @@ -1167,13 +1218,15 @@ static int iomInterpretCMP(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault * @returns VBox status code. * * @param pVM The virtual machine. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param pRegFrame Trap register frame. * @param GCPhysFault The GC physical address corresponding to pvFault. * @param pCpu Disassembler CPU state. * @param pRange Pointer MMIO range. * @param pfnEmulate Instruction emulation function. */ -static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate) +static int iomInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, + PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate) { unsigned cb = 0; uint64_t uData1 = 0; @@ -1204,7 +1257,7 @@ static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhys /* and reg, [MMIO]. */ Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3); fAndWrite = false; - rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb); + rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb); } else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb)) { @@ -1217,7 +1270,7 @@ static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhys fAndWrite = true; if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3) && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3)) - rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb); + rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb); else rc = VINF_IOM_R3_MMIO_READ_WRITE; } @@ -1236,7 +1289,7 @@ static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhys if (fAndWrite) /* Store result to MMIO. */ - rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb); + rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb); else { /* Store result to register. */ @@ -1266,12 +1319,14 @@ static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhys * @returns VBox status code. * * @param pVM The virtual machine. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param pRegFrame Trap register frame. * @param GCPhysFault The GC physical address corresponding to pvFault. * @param pCpu Disassembler CPU state. * @param pRange Pointer MMIO range. */ -static int iomInterpretTEST(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange) +static int iomInterpretTEST(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, + PIOMMMIORANGE pRange) { Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3); @@ -1283,12 +1338,12 @@ static int iomInterpretTEST(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFaul if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb)) { /* and test, [MMIO]. */ - rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb); + rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb); } else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb)) { /* test [MMIO], reg|imm. */ - rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb); + rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb); } else { @@ -1324,12 +1379,14 @@ static int iomInterpretTEST(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFaul * @returns VBox status code. * * @param pVM The virtual machine. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param pRegFrame Trap register frame. * @param GCPhysFault The GC physical address corresponding to pvFault. * @param pCpu Disassembler CPU state. * @param pRange Pointer MMIO range. */ -static int iomInterpretBT(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange) +static int iomInterpretBT(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, + PIOMMMIORANGE pRange) { Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3); @@ -1346,7 +1403,7 @@ static int iomInterpretBT(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, unsigned cbData = DISGetParamSize(pCpu, &pCpu->Param1); /* bt [MMIO], reg|imm. */ - int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData, cbData); + int rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData, cbData); if (rc == VINF_SUCCESS) { /* Find the bit inside the faulting address */ @@ -1367,12 +1424,14 @@ static int iomInterpretBT(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, * @returns VBox status code. * * @param pVM The virtual machine. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param pRegFrame Trap register frame. * @param GCPhysFault The GC physical address corresponding to pvFault. * @param pCpu Disassembler CPU state. * @param pRange Pointer MMIO range. */ -static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange) +static int iomInterpretXCHG(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, + PIOMMMIORANGE pRange) { /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */ if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3) @@ -1386,11 +1445,11 @@ static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFaul if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb)) { /* xchg reg, [MMIO]. */ - rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb); + rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb); if (rc == VINF_SUCCESS) { /* Store result to MMIO. */ - rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb); + rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb); if (rc == VINF_SUCCESS) { @@ -1407,11 +1466,11 @@ static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFaul else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb)) { /* xchg [MMIO], reg. */ - rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb); + rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb); if (rc == VINF_SUCCESS) { /* Store result to MMIO. */ - rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData2, cb); + rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb); if (rc == VINF_SUCCESS) { /* Store result to register. */ @@ -1419,10 +1478,10 @@ static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFaul AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc); } else - AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE, ("rc=%Rrc\n", rc)); + AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc)); } else - AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ, ("rc=%Rrc\n", rc)); + AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc)); } else { @@ -1438,16 +1497,16 @@ static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFaul * * @returns VBox status code (appropriate for GC return). * @param pVM Pointer to the VM. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have * any error code (the EPT misconfig hack). * @param pCtxCore Trap register frame. * @param GCPhysFault The GC physical address corresponding to pvFault. * @param pvUser Pointer to the MMIO ring-3 range entry. */ -static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser) +static int iomMMIOHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser) { - /* Take the IOM lock before performing any MMIO. */ - int rc = IOM_LOCK(pVM); + int rc = IOM_LOCK_SHARED(pVM); #ifndef IN_RING3 if (rc == VERR_SEM_BUSY) return VINF_IOM_R3_MMIO_READ_WRITE; @@ -1455,27 +1514,28 @@ static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, R AssertRC(rc); STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a); - Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", - GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip)); + Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip)); PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser; Assert(pRange); - Assert(pRange == iomMmioGetRange(pVM, GCPhysFault)); + Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault)); + iomMmioRetainRange(pRange); +#ifndef VBOX_WITH_STATISTICS + IOM_UNLOCK_SHARED(pVM); -#ifdef VBOX_WITH_STATISTICS +#else /* - * Locate the statistics, if > PAGE_SIZE we'll use the first byte for everything. + * Locate the statistics. */ - PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhysFault, pRange); + PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange); if (!pStats) { + iomMmioReleaseRange(pVM, pRange); # ifdef IN_RING3 - IOM_UNLOCK(pVM); return VERR_NO_MEMORY; # else STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a); STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures); - IOM_UNLOCK(pVM); return VINF_IOM_R3_MMIO_READ_WRITE; # endif } @@ -1504,7 +1564,7 @@ static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, R STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a); STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures); - IOM_UNLOCK(pVM); + iomMmioReleaseRange(pVM, pRange); return VINF_IOM_R3_MMIO_READ_WRITE; } #endif /* !IN_RING3 */ @@ -1512,9 +1572,7 @@ static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, R /* * Retain the range and do locking. */ - iomMmioRetainRange(pRange); PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns); - IOM_UNLOCK(pVM); rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE); if (rc != VINF_SUCCESS) { @@ -1525,14 +1583,13 @@ static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, R /* * Disassemble the instruction and interpret it. */ - PVMCPU pVCpu = VMMGetCpu(pVM); PDISCPUSTATE pDis = &pVCpu->iom.s.DisState; unsigned cbOp; rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp); if (RT_FAILURE(rc)) { - iomMmioReleaseRange(pVM, pRange); PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo)); + iomMmioReleaseRange(pVM, pRange); return rc; } switch (pDis->pCurInstr->uOpcode) @@ -1546,9 +1603,9 @@ static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, R if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */ ? uErrorCode & X86_TRAP_PF_RW : DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse)) - rc = iomInterpretMOVxXWrite(pVM, pCtxCore, pDis, pRange, GCPhysFault); + rc = iomInterpretMOVxXWrite(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault); else - rc = iomInterpretMOVxXRead(pVM, pCtxCore, pDis, pRange, GCPhysFault); + rc = iomInterpretMOVxXRead(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault); STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b); break; } @@ -1575,7 +1632,7 @@ static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, R case OP_STOSWD: Assert(uErrorCode & X86_TRAP_PF_RW); STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d); - rc = iomInterpretSTOS(pVM, pCtxCore, GCPhysFault, pDis, pRange); + rc = iomInterpretSTOS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange); STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d); break; @@ -1583,52 +1640,52 @@ static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, R case OP_LODSWD: Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX); STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e); - rc = iomInterpretLODS(pVM, pCtxCore, GCPhysFault, pDis, pRange); + rc = iomInterpretLODS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange); STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e); break; case OP_CMP: Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX); STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f); - rc = iomInterpretCMP(pVM, pCtxCore, GCPhysFault, pDis, pRange); + rc = iomInterpretCMP(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange); STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f); break; case OP_AND: STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g); - rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd); + rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd); STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g); break; case OP_OR: STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k); - rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr); + rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr); STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k); break; case OP_XOR: STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m); - rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor); + rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor); STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m); break; case OP_TEST: Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX); STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h); - rc = iomInterpretTEST(pVM, pCtxCore, GCPhysFault, pDis, pRange); + rc = iomInterpretTEST(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange); STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h); break; case OP_BT: Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX); STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l); - rc = iomInterpretBT(pVM, pCtxCore, GCPhysFault, pDis, pRange); + rc = iomInterpretBT(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange); STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l); break; case OP_XCHG: STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i); - rc = iomInterpretXCHG(pVM, pCtxCore, GCPhysFault, pDis, pRange); + rc = iomInterpretXCHG(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange); STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i); break; @@ -1665,8 +1722,8 @@ static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, R } STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a); - iomMmioReleaseRange(pVM, pRange); PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo)); + iomMmioReleaseRange(pVM, pRange); return rc; } @@ -1685,7 +1742,7 @@ VMMDECL(int) IOMMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, { LogFlow(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n", GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip)); - VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser); + VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, VMMGetCpu(pVM), (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser); return VBOXSTRICTRC_VAL(rcStrict); } @@ -1694,19 +1751,33 @@ VMMDECL(int) IOMMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, * * @returns VBox status code (appropriate for GC return). * @param pVM Pointer to the VM. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param uErrorCode CPU Error code. * @param pCtxCore Trap register frame. * @param GCPhysFault The GC physical address. */ -VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault) +VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault) { - int rc2 = IOM_LOCK(pVM); NOREF(rc2); + /* + * We don't have a range here, so look it up before calling the common function. + */ + int rc2 = IOM_LOCK_SHARED(pVM); NOREF(rc2); #ifndef IN_RING3 if (rc2 == VERR_SEM_BUSY) return VINF_IOM_R3_MMIO_READ_WRITE; #endif - VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, iomMmioGetRange(pVM, GCPhysFault)); - IOM_UNLOCK(pVM); + PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault); + if (RT_UNLIKELY(!pRange)) + { + IOM_UNLOCK_SHARED(pVM); + return VERR_IOM_MMIO_RANGE_NOT_FOUND; + } + iomMmioRetainRange(pRange); + IOM_UNLOCK_SHARED(pVM); + + VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pRange); + + iomMmioReleaseRange(pVM, pRange); return VBOXSTRICTRC_VAL(rcStrict); } @@ -1728,26 +1799,27 @@ VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXC DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser) { + PVMCPU pVCpu = VMMGetCpu(pVM); PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser; STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler); - AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf)); + AssertMsg(cbBuf >= 1 && cbBuf <= 16, ("%zu\n", cbBuf)); AssertPtr(pRange); NOREF(pvPhys); /* * Validate the range. */ - int rc = IOM_LOCK(pVM); + int rc = IOM_LOCK_SHARED(pVM); AssertRC(rc); - Assert(pRange == iomMmioGetRange(pVM, GCPhysFault)); + Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault)); /* * Perform locking. */ iomMmioRetainRange(pRange); PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns); - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE); if (rc != VINF_SUCCESS) { @@ -1759,9 +1831,9 @@ DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, * Perform the access. */ if (enmAccessType == PGMACCESSTYPE_READ) - rc = iomMMIODoRead(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf); + rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf); else - rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf); + rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf); AssertRC(rc); iomMmioReleaseRange(pVM, pRange); @@ -1777,14 +1849,15 @@ DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, * @returns VBox status code. * * @param pVM Pointer to the VM. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param GCPhys The physical address to read. * @param pu32Value Where to store the value read. * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes. */ -VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue) +VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue) { /* Take the IOM lock before performing any MMIO. */ - VBOXSTRICTRC rc = IOM_LOCK(pVM); + VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM); #ifndef IN_RING3 if (rc == VERR_SEM_BUSY) return VINF_IOM_R3_MMIO_WRITE; @@ -1797,18 +1870,22 @@ VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, /* * Lookup the current context range node and statistics. */ - PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys); + PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys); if (!pRange) { AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue)); - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); return VERR_IOM_MMIO_RANGE_NOT_FOUND; } -#ifdef VBOX_WITH_STATISTICS - PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange); + iomMmioRetainRange(pRange); +#ifndef VBOX_WITH_STATISTICS + IOM_UNLOCK_SHARED(pVM); + +#else /* VBOX_WITH_STATISTICS */ + PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange); if (!pStats) { - IOM_UNLOCK(pVM); + iomMmioReleaseRange(pVM, pRange); # ifdef IN_RING3 return VERR_NO_MEMORY; # else @@ -1823,9 +1900,7 @@ VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, /* * Perform locking. */ - iomMmioRetainRange(pRange); PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns); - IOM_UNLOCK(pVM); rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE); if (rc != VINF_SUCCESS) { @@ -1849,8 +1924,8 @@ VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, { case VINF_SUCCESS: Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue)); - iomMmioReleaseRange(pVM, pRange); PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo)); + iomMmioReleaseRange(pVM, pRange); return rc; #ifndef IN_RING3 case VINF_IOM_R3_MMIO_READ: @@ -1859,22 +1934,22 @@ VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, #endif default: Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc))); - iomMmioReleaseRange(pVM, pRange); PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo)); + iomMmioReleaseRange(pVM, pRange); return rc; case VINF_IOM_MMIO_UNUSED_00: iomMMIODoRead00s(pu32Value, cbValue); Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc))); - iomMmioReleaseRange(pVM, pRange); PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo)); + iomMmioReleaseRange(pVM, pRange); return VINF_SUCCESS; case VINF_IOM_MMIO_UNUSED_FF: iomMMIODoReadFFs(pu32Value, cbValue); Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc))); - iomMmioReleaseRange(pVM, pRange); PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo)); + iomMmioReleaseRange(pVM, pRange); return VINF_SUCCESS; } /* not reached */ @@ -1883,7 +1958,7 @@ VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, if (pRange->pfnReadCallbackR3) { STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3)); - IOM_UNLOCK(pVM); + iomMmioReleaseRange(pVM, pRange); return VINF_IOM_R3_MMIO_READ; } #endif @@ -1895,7 +1970,7 @@ VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a); iomMMIODoReadFFs(pu32Value, cbValue); Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue)); - IOM_UNLOCK(pVM); + iomMmioReleaseRange(pVM, pRange); return VINF_SUCCESS; } @@ -1906,14 +1981,15 @@ VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, * @returns VBox status code. * * @param pVM Pointer to the VM. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param GCPhys The physical address to write to. * @param u32Value The value to write. * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes. */ -VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue) +VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue) { /* Take the IOM lock before performing any MMIO. */ - VBOXSTRICTRC rc = IOM_LOCK(pVM); + VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM); #ifndef IN_RING3 if (rc == VERR_SEM_BUSY) return VINF_IOM_R3_MMIO_WRITE; @@ -1926,18 +2002,22 @@ VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, /* * Lookup the current context range node. */ - PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys); + PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys); if (!pRange) { AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue)); - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); return VERR_IOM_MMIO_RANGE_NOT_FOUND; } -#ifdef VBOX_WITH_STATISTICS - PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange); + iomMmioRetainRange(pRange); +#ifndef VBOX_WITH_STATISTICS + IOM_UNLOCK_SHARED(pVM); + +#else /* VBOX_WITH_STATISTICS */ + PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange); if (!pStats) { - IOM_UNLOCK(pVM); + iomMmioReleaseRange(pVM, pRange); # ifdef IN_RING3 return VERR_NO_MEMORY; # else @@ -1952,9 +2032,7 @@ VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, /* * Perform locking. */ - iomMmioRetainRange(pRange); PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns); - IOM_UNLOCK(pVM); rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ); if (rc != VINF_SUCCESS) { @@ -1988,7 +2066,7 @@ VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, if (pRange->pfnWriteCallbackR3) { STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3)); - IOM_UNLOCK(pVM); + iomMmioReleaseRange(pVM, pRange); return VINF_IOM_R3_MMIO_WRITE; } #endif @@ -1999,7 +2077,7 @@ VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a); STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a); Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS)); - IOM_UNLOCK(pVM); + iomMmioReleaseRange(pVM, pRange); return VINF_SUCCESS; } @@ -2022,13 +2100,14 @@ VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr) * * @param pVM The virtual machine. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure. * @param uPort IO Port * @param uPrefix IO instruction prefix * @param enmAddrMode The address mode. * @param cbTransfer Size of transfer unit */ -VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, +VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, DISCPUMODE enmAddrMode, uint32_t cbTransfer) { STAM_COUNTER_INC(&pVM->iom.s.StatInstIns); @@ -2041,8 +2120,6 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_ || pRegFrame->eflags.Bits.u1DF) return VINF_EM_RAW_EMULATE_INSTR; - PVMCPU pVCpu = VMMGetCpu(pVM); - /* * Get bytes/words/dwords count to transfer. */ @@ -2088,7 +2165,7 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_ /* If the device supports string transfers, ask it to do as * much as it wants. The rest is done with single-word transfers. */ const RTGCUINTREG cTransfersOrg = cTransfers; - rcStrict = IOMIOPortReadString(pVM, uPort, &GCPtrDst, &cTransfers, cbTransfer); + rcStrict = IOMIOPortReadString(pVM, pVCpu, uPort, &GCPtrDst, &cTransfers, cbTransfer); AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg); pRegFrame->rdi = ((pRegFrame->rdi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask) | (pRegFrame->rdi & ~fAddrMask); @@ -2100,7 +2177,7 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_ while (cTransfers && rcStrict == VINF_SUCCESS) { uint32_t u32Value; - rcStrict = IOMIOPortRead(pVM, uPort, &u32Value, cbTransfer); + rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &u32Value, cbTransfer); if (!IOM_SUCCESS(rcStrict)) break; rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer); @@ -2124,6 +2201,7 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_ } +#if !defined(VBOX_WITH_FIRST_IEM_STEP) || defined(IN_RC) /* Latter for IOMRCIOPortHandler */ /** * [REP*] INSB/INSW/INSD * ES:EDI,DX[,ECX] @@ -2140,10 +2218,11 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_ * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr) * * @param pVM The virtual machine. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure. * @param pCpu Disassembler CPU state. */ -VMMDECL(VBOXSTRICTRC) IOMInterpretINS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu) +VMMDECL(VBOXSTRICTRC) IOMInterpretINS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu) { /* * Get port number directly from the register (no need to bother the @@ -2163,8 +2242,9 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretINS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUST return rcStrict; } - return IOMInterpretINSEx(pVM, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb); + return IOMInterpretINSEx(pVM, pVCpu, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb); } +#endif /* !IEM || RC */ /** @@ -2184,13 +2264,14 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretINS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUST * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr) * * @param pVM The virtual machine. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure. * @param uPort IO Port * @param uPrefix IO instruction prefix * @param enmAddrMode The address mode. * @param cbTransfer Size of transfer unit */ -VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, +VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, DISCPUMODE enmAddrMode, uint32_t cbTransfer) { STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts); @@ -2203,8 +2284,6 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32 || pRegFrame->eflags.Bits.u1DF) return VINF_EM_RAW_EMULATE_INSTR; - PVMCPU pVCpu = VMMGetCpu(pVM); - /* * Get bytes/words/dwords count to transfer. */ @@ -2252,7 +2331,7 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32 * much as it wants. The rest is done with single-word transfers. */ const RTGCUINTREG cTransfersOrg = cTransfers; - rcStrict = IOMIOPortWriteString(pVM, uPort, &GCPtrSrc, &cTransfers, cbTransfer); + rcStrict = IOMIOPortWriteString(pVM, pVCpu, uPort, &GCPtrSrc, &cTransfers, cbTransfer); AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg); pRegFrame->rsi = ((pRegFrame->rsi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask) | (pRegFrame->rsi & ~fAddrMask); @@ -2268,7 +2347,7 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer); if (rcStrict != VINF_SUCCESS) break; - rcStrict = IOMIOPortWrite(pVM, uPort, u32Value, cbTransfer); + rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, u32Value, cbTransfer); if (!IOM_SUCCESS(rcStrict)) break; GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer); @@ -2291,6 +2370,7 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32 } +#if !defined(VBOX_WITH_FIRST_IEM_STEP) || defined(IN_RC) /* Latter for IOMRCIOPortHandler */ /** * [REP*] OUTSB/OUTSW/OUTSD * DS:ESI,DX[,ECX] @@ -2307,10 +2387,11 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr) * * @param pVM The virtual machine. + * @param pVCpu Pointer to the virtual CPU structure of the caller. * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure. * @param pCpu Disassembler CPU state. */ -VMMDECL(VBOXSTRICTRC) IOMInterpretOUTS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu) +VMMDECL(VBOXSTRICTRC) IOMInterpretOUTS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu) { /* * Get port number from the first parameter. @@ -2332,8 +2413,9 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretOUTS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUS return rcStrict; } - return IOMInterpretOUTSEx(pVM, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb); + return IOMInterpretOUTSEx(pVM, pVCpu, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb); } +#endif /* !IEM || RC */ #ifndef IN_RC @@ -2353,25 +2435,26 @@ VMMDECL(VBOXSTRICTRC) IOMInterpretOUTS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUS */ VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags) { +# ifndef IEM_VERIFICATION_MODE_FULL /* Currently only called from the VGA device during MMIO. */ Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags)); AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER); PVMCPU pVCpu = VMMGetCpu(pVM); /* This currently only works in real mode, protected mode without paging or with nested paging. */ - if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */ + if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */ || ( CPUMIsGuestInPagedProtectedMode(pVCpu) - && !HWACCMIsNestedPagingActive(pVM))) + && !HMIsNestedPagingActive(pVM))) return VINF_SUCCESS; /* ignore */ - int rc = IOM_LOCK(pVM); + int rc = IOM_LOCK_SHARED(pVM); if (RT_FAILURE(rc)) return VINF_SUCCESS; /* better luck the next time around */ /* * Lookup the context range node the page belongs to. */ - PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys); + PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys); AssertMsgReturn(pRange, ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND); @@ -2386,7 +2469,7 @@ VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapp rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped); - IOM_UNLOCK(pVM); + IOM_UNLOCK_SHARED(pVM); AssertRCReturn(rc, rc); /* @@ -2395,20 +2478,22 @@ VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapp * * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page. */ -#if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */ -# ifdef VBOX_STRICT +# if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */ +# ifdef VBOX_STRICT uint64_t fFlags; RTHCPHYS HCPhys; rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys); Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT); -# endif -#endif +# endif +# endif rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys); Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT); +# endif /* !IEM_VERIFICATION_MODE_FULL */ return VINF_SUCCESS; } +# ifndef IEM_VERIFICATION_MODE_FULL /** * Mapping a HC page in place of an MMIO page for direct access. * @@ -2416,28 +2501,27 @@ VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapp * * @returns VBox status code. * - * @param pVM The virtual machine. + * @param pVM Pointer to the VM. + * @param pVCpu Pointer to the VMCPU. * @param GCPhys The address of the MMIO page to be changed. * @param HCPhys The address of the host physical page. * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P) * for the time being. */ -VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags) +VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags) { /* Currently only called from VT-x code during a page fault. */ Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags)); AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER); - Assert(HWACCMIsEnabled(pVM)); - - PVMCPU pVCpu = VMMGetCpu(pVM); + Assert(HMIsEnabled(pVM)); /* * Lookup the context range node the page belongs to. */ #ifdef VBOX_STRICT /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */ - PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, GCPhys); + PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys); AssertMsgReturn(pRange, ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND); Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0); @@ -2463,6 +2547,7 @@ VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uin Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT); return VINF_SUCCESS; } +#endif /* !IEM_VERIFICATION_MODE_FULL */ /** @@ -2480,9 +2565,9 @@ VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys) PVMCPU pVCpu = VMMGetCpu(pVM); /* This currently only works in real mode, protected mode without paging or with nested paging. */ - if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */ + if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */ || ( CPUMIsGuestInPagedProtectedMode(pVCpu) - && !HWACCMIsNestedPagingActive(pVM))) + && !HMIsNestedPagingActive(pVM))) return VINF_SUCCESS; /* ignore */ /* @@ -2490,7 +2575,7 @@ VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys) */ #ifdef VBOX_STRICT /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */ - PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, GCPhys); + PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys); AssertMsgReturn(pRange, ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND); Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0); @@ -2507,7 +2592,7 @@ VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys) AssertRC(rc); #ifdef VBOX_STRICT - if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)) + if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)) { uint32_t cb = pRange->cb; GCPhys = pRange->GCPhys; diff --git a/src/VBox/VMM/VMMAll/MMAll.cpp b/src/VBox/VMM/VMMAll/MMAll.cpp index b34d0c5e..88b9c4a2 100644 --- a/src/VBox/VMM/VMMAll/MMAll.cpp +++ b/src/VBox/VMM/VMMAll/MMAll.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -24,6 +24,7 @@ #include <VBox/vmm/vmm.h> #include "MMInternal.h" #include <VBox/vmm/vm.h> +#include <VBox/vmm/hm.h> #include <VBox/log.h> #include <iprt/assert.h> #include <iprt/string.h> @@ -256,7 +257,7 @@ DECLINLINE(RTR0PTR) mmHyperLookupCalcR0(PVM pVM, PMMLOOKUPHYPER pLookup, uint32_ if (pLookup->u.Locked.pvR0) return (RTR0PTR)((RTR0UINTPTR)pLookup->u.Locked.pvR0 + off); #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE - AssertMsg(!VMMIsHwVirtExtForced(pVM), ("%s\n", R3STRING(pLookup->pszDesc))); + AssertMsg(!HMIsEnabled(pVM), ("%s\n", R3STRING(pLookup->pszDesc))); #else AssertMsgFailed(("%s\n", R3STRING(pLookup->pszDesc))); NOREF(pVM); #endif @@ -566,6 +567,10 @@ const char *mmGetTagName(MMTAG enmTag) TAG2STR(CFGM_STRING); TAG2STR(CFGM_USER); + TAG2STR(CPUM_CTX); + TAG2STR(CPUM_CPUID); + TAG2STR(CPUM_MSRS); + TAG2STR(CSAM); TAG2STR(CSAM_PATCH); @@ -639,7 +644,7 @@ const char *mmGetTagName(MMTAG enmTag) TAG2STR(VMM); - TAG2STR(HWACCM); + TAG2STR(HM); #undef TAG2STR diff --git a/src/VBox/VMM/VMMAll/MMAllHyper.cpp b/src/VBox/VMM/VMMAll/MMAllHyper.cpp index 4d12895b..b62dd6d7 100644 --- a/src/VBox/VMM/VMMAll/MMAllHyper.cpp +++ b/src/VBox/VMM/VMMAll/MMAllHyper.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -321,14 +321,13 @@ static int mmHyperAllocInternal(PVM pVM, size_t cb, unsigned uAlignment, MMTAG e return VERR_MM_HYPER_NO_MEMORY; } + /** * Wrapper for mmHyperAllocInternal */ VMMDECL(int) MMHyperAlloc(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv) { - int rc; - - rc = mmHyperLock(pVM); + int rc = mmHyperLock(pVM); AssertRCReturn(rc, rc); LogFlow(("MMHyperAlloc %x align=%x tag=%s\n", cb, uAlignment, mmGetTagName(enmTag))); @@ -339,6 +338,19 @@ VMMDECL(int) MMHyperAlloc(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, return rc; } + +/** + * Duplicates a block of memory. + */ +VMMDECL(int) MMHyperDupMem(PVM pVM, const void *pvSrc, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv) +{ + int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv); + if (RT_SUCCESS(rc)) + memcpy(*ppv, pvSrc, cb); + return rc; +} + + /** * Allocates a chunk of memory from the specified heap. * The caller validates the parameters of this request. diff --git a/src/VBox/VMM/VMMAll/MMAllPagePool.cpp b/src/VBox/VMM/VMMAll/MMAllPagePool.cpp index 64244229..8f915b24 100644 --- a/src/VBox/VMM/VMMAll/MMAllPagePool.cpp +++ b/src/VBox/VMM/VMMAll/MMAllPagePool.cpp @@ -6,7 +6,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMAll/PATMAll.cpp b/src/VBox/VMM/VMMAll/PATMAll.cpp index 60a9642d..ebd40c43 100644 --- a/src/VBox/VMM/VMMAll/PATMAll.cpp +++ b/src/VBox/VMM/VMMAll/PATMAll.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -21,10 +21,8 @@ #define LOG_GROUP LOG_GROUP_PATM #include <VBox/vmm/patm.h> #include <VBox/vmm/cpum.h> -#include <VBox/dis.h> -#include <VBox/disopcode.h> #include <VBox/vmm/em.h> -#include <VBox/err.h> +#include <VBox/vmm/hm.h> #include <VBox/vmm/selm.h> #include <VBox/vmm/mm.h> #include "PATMInternal.h" @@ -32,8 +30,12 @@ #include <VBox/vmm/vmm.h> #include "PATMA.h" +#include <VBox/dis.h> +#include <VBox/disopcode.h> +#include <VBox/err.h> #include <VBox/log.h> #include <iprt/assert.h> +#include <iprt/string.h> /** @@ -46,26 +48,31 @@ * @param pCtxCore The cpu context core. * @see pg_raw */ -VMMDECL(void) PATMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore) +VMM_INT_DECL(void) PATMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore) { - bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtxCore->eip); + Assert(!HMIsEnabled(pVM)); /* * Currently we don't bother to check whether PATM is enabled or not. * For all cases where it isn't, IOPL will be safe and IF will be set. */ - register uint32_t efl = pCtxCore->eflags.u32; + uint32_t efl = pCtxCore->eflags.u32; CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK; - AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTRCPTR)pCtxCore->eip), ("X86_EFL_IF is clear and PATM is disabled! (eip=%RRv eflags=%08x fPATM=%d pPATMGC=%RRv-%RRv\n", pCtxCore->eip, pCtxCore->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC, pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem)); - AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode, ("fPIF=%d eip=%RRv\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip)); + AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTRCPTR)pCtxCore->eip), + ("X86_EFL_IF is clear and PATM is disabled! (eip=%RRv eflags=%08x fPATM=%d pPATMGC=%RRv-%RRv\n", + pCtxCore->eip, pCtxCore->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC, + pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem)); + + AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || PATMIsPatchGCAddr(pVM, pCtxCore->eip), + ("fPIF=%d eip=%RRv\n", pVM->patm.s.CTXSUFF(pGCState)->fPIF, pCtxCore->eip)); efl &= ~PATM_VIRTUAL_FLAGS_MASK; efl |= X86_EFL_IF; pCtxCore->eflags.u32 = efl; #ifdef IN_RING3 -#ifdef PATM_EMULATE_SYSENTER +# ifdef PATM_EMULATE_SYSENTER PCPUMCTX pCtx; /* Check if the sysenter handler has changed. */ @@ -100,7 +107,7 @@ VMMDECL(void) PATMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore) pVM->patm.s.pfnSysEnterPatchGC = 0; pVM->patm.s.pfnSysEnterGC = 0; } -#endif +# endif /* PATM_EMULATE_SYSENTER */ #endif } @@ -117,13 +124,15 @@ VMMDECL(void) PATMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore) * @param rawRC Raw mode return code * @see @ref pg_raw */ -VMMDECL(void) PATMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rawRC) +VMM_INT_DECL(void) PATMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rawRC) { + Assert(!HMIsEnabled(pVM)); bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtxCore->eip); + /* * We will only be called if PATMRawEnter was previously called. */ - register uint32_t efl = pCtxCore->eflags.u32; + uint32_t efl = pCtxCore->eflags.u32; efl = (efl & ~PATM_VIRTUAL_FLAGS_MASK) | (CTXSUFF(pVM->patm.s.pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK); pCtxCore->eflags.u32 = efl; CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = X86_EFL_IF; @@ -132,12 +141,11 @@ VMMDECL(void) PATMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rawRC) AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode || RT_FAILURE(rawRC), ("fPIF=%d eip=%RRv rc=%Rrc\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip, rawRC)); #ifdef IN_RING3 - if ( (efl & X86_EFL_IF) - && fPatchCode - ) + if ( (efl & X86_EFL_IF) + && fPatchCode) { - if ( rawRC < VINF_PATM_LEAVE_RC_FIRST - || rawRC > VINF_PATM_LEAVE_RC_LAST) + if ( rawRC < VINF_PATM_LEAVE_RC_FIRST + || rawRC > VINF_PATM_LEAVE_RC_LAST) { /* * Golden rules: @@ -157,7 +165,7 @@ VMMDECL(void) PATMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rawRC) Assert(enmState != PATMTRANS_OVERWRITTEN); if (enmState == PATMTRANS_SAFE) { - Assert(!PATMFindActivePatchByEntrypoint(pVM, pOrgInstrGC)); + Assert(!patmFindActivePatchByEntrypoint(pVM, pOrgInstrGC)); Log(("Switchback from %RRv to %RRv (Psp=%x)\n", pCtxCore->eip, pOrgInstrGC, CTXSUFF(pVM->patm.s.pGCState)->Psp)); STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBack); pCtxCore->eip = pOrgInstrGC; @@ -178,9 +186,13 @@ VMMDECL(void) PATMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rawRC) } } } -#else /* !IN_RING3 */ - AssertMsgFailed(("!IN_RING3")); -#endif /* !IN_RING3 */ +#else /* !IN_RING3 */ + /* + * When leaving raw-mode state while IN_RC, it's generally for interpreting + * a single original guest instruction. + */ + AssertMsg(!fPatchCode, ("eip=%RRv\n", pCtxCore->eip)); +#endif /* !IN_RING3 */ if (!fPatchCode) { @@ -209,8 +221,9 @@ VMMDECL(void) PATMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rawRC) * @param pVM Pointer to the VM. * @param pCtxCore The context core. */ -VMMDECL(uint32_t) PATMRawGetEFlags(PVM pVM, PCCPUMCTXCORE pCtxCore) +VMM_INT_DECL(uint32_t) PATMRawGetEFlags(PVM pVM, PCCPUMCTXCORE pCtxCore) { + Assert(!HMIsEnabled(pVM)); uint32_t efl = pCtxCore->eflags.u32; efl &= ~PATM_VIRTUAL_FLAGS_MASK; efl |= pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK; @@ -225,8 +238,9 @@ VMMDECL(uint32_t) PATMRawGetEFlags(PVM pVM, PCCPUMCTXCORE pCtxCore) * @param pCtxCore The context core. * @param efl The new EFLAGS value. */ -VMMDECL(void) PATMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t efl) +VMM_INT_DECL(void) PATMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t efl) { + Assert(!HMIsEnabled(pVM)); pVM->patm.s.CTXSUFF(pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK; efl &= ~PATM_VIRTUAL_FLAGS_MASK; efl |= X86_EFL_IF; @@ -239,7 +253,7 @@ VMMDECL(void) PATMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t efl) * @param pVM Pointer to the VM. * @param pAddrGC Guest context address */ -VMMDECL(bool) PATMShouldUseRawMode(PVM pVM, RTRCPTR pAddrGC) +VMM_INT_DECL(bool) PATMShouldUseRawMode(PVM pVM, RTRCPTR pAddrGC) { return ( PATMIsEnabled(pVM) && ((pAddrGC >= (RTRCPTR)pVM->patm.s.pPatchMemGC && pAddrGC < (RTRCPTR)((RTRCUINTPTR)pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem)))) ? true : false; @@ -251,8 +265,9 @@ VMMDECL(bool) PATMShouldUseRawMode(PVM pVM, RTRCPTR pAddrGC) * @returns VBox status code. * @param pVM Pointer to the VM. */ -VMMDECL(RCPTRTYPE(PPATMGCSTATE)) PATMQueryGCState(PVM pVM) +VMM_INT_DECL(RCPTRTYPE(PPATMGCSTATE)) PATMGetGCState(PVM pVM) { + AssertReturn(!HMIsEnabled(pVM), NIL_RTRCPTR); return pVM->patm.s.pGCStateGC; } @@ -262,6 +277,7 @@ VMMDECL(RCPTRTYPE(PPATMGCSTATE)) PATMQueryGCState(PVM pVM) * @returns VBox status code. * @param pVM Pointer to the VM. * @param pAddrGC Guest context address + * @internal */ VMMDECL(bool) PATMIsPatchGCAddr(PVM pVM, RTRCUINTPTR pAddrGC) { @@ -269,6 +285,46 @@ VMMDECL(bool) PATMIsPatchGCAddr(PVM pVM, RTRCUINTPTR pAddrGC) } /** + * Reads patch code. + * + * @returns + * @retval VINF_SUCCESS on success. + * @retval VERR_PATCH_NOT_FOUND if the request is entirely outside the patch + * code. + * + * @param pVM The cross context VM structure. + * @param GCPtrPatchCode The patch address to start reading at. + * @param pvDst Where to return the patch code. + * @param cbToRead Number of bytes to read. + * @param pcbRead Where to return the actual number of bytes we've + * read. Optional. + */ +VMM_INT_DECL(int) PATMReadPatchCode(PVM pVM, RTGCPTR GCPtrPatchCode, void *pvDst, size_t cbToRead, size_t *pcbRead) +{ + /* Shortcut. */ + if (!PATMIsEnabled(pVM)) + return VERR_PATCH_NOT_FOUND; + Assert(!HMIsEnabled(pVM)); + + RTGCPTR offPatchedInstr = GCPtrPatchCode - (RTGCPTR32)pVM->patm.s.pPatchMemGC; + if (offPatchedInstr >= pVM->patm.s.cbPatchMem) + return VERR_PATCH_NOT_FOUND; + + uint32_t cbMaxRead = pVM->patm.s.cbPatchMem - (uint32_t)offPatchedInstr; + if (cbToRead > cbMaxRead) + cbToRead = cbMaxRead; + +#ifdef IN_RC + memcpy(pvDst, pVM->patm.s.pPatchMemGC + (uint32_t)offPatchedInstr, cbToRead); +#else + memcpy(pvDst, pVM->patm.s.pPatchMemHC + (uint32_t)offPatchedInstr, cbToRead); +#endif + if (pcbRead) + *pcbRead = cbToRead; + return VINF_SUCCESS; +} + +/** * Set parameters for pending MMIO patch operation * * @returns VBox status code. @@ -276,10 +332,13 @@ VMMDECL(bool) PATMIsPatchGCAddr(PVM pVM, RTRCUINTPTR pAddrGC) * @param GCPhys MMIO physical address * @param pCachedData GC pointer to cached data */ -VMMDECL(int) PATMSetMMIOPatchInfo(PVM pVM, RTGCPHYS GCPhys, RTRCPTR pCachedData) +VMM_INT_DECL(int) PATMSetMMIOPatchInfo(PVM pVM, RTGCPHYS GCPhys, RTRCPTR pCachedData) { - pVM->patm.s.mmio.GCPhys = GCPhys; - pVM->patm.s.mmio.pCachedData = (RTRCPTR)pCachedData; + if (!HMIsEnabled(pVM)) + { + pVM->patm.s.mmio.GCPhys = GCPhys; + pVM->patm.s.mmio.pCachedData = (RTRCPTR)pCachedData; + } return VINF_SUCCESS; } @@ -291,8 +350,9 @@ VMMDECL(int) PATMSetMMIOPatchInfo(PVM pVM, RTGCPHYS GCPhys, RTRCPTR pCachedData) * @returns false if it's disabled. * * @param pVM Pointer to the VM. + * @todo CPUM should wrap this, EM.cpp shouldn't call us. */ -VMMDECL(bool) PATMAreInterruptsEnabled(PVM pVM) +VMM_INT_DECL(bool) PATMAreInterruptsEnabled(PVM pVM) { PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)); @@ -307,11 +367,13 @@ VMMDECL(bool) PATMAreInterruptsEnabled(PVM pVM) * * @param pVM Pointer to the VM. * @param pCtxCore CPU context + * @todo CPUM should wrap this, EM.cpp shouldn't call us. */ -VMMDECL(bool) PATMAreInterruptsEnabledByCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore) +VMM_INT_DECL(bool) PATMAreInterruptsEnabledByCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore) { if (PATMIsEnabled(pVM)) { + Assert(!HMIsEnabled(pVM)); if (PATMIsPatchGCAddr(pVM, pCtxCore->eip)) return false; } @@ -326,7 +388,7 @@ VMMDECL(bool) PATMAreInterruptsEnabledByCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore) * @param pInstrGC Guest context point to the instruction * */ -VMMDECL(PPATMPATCHREC) PATMQueryFunctionPatch(PVM pVM, RTRCPTR pInstrGC) +PPATMPATCHREC patmQueryFunctionPatch(PVM pVM, RTRCPTR pInstrGC) { PPATMPATCHREC pRec; @@ -350,9 +412,10 @@ VMMDECL(PPATMPATCHREC) PATMQueryFunctionPatch(PVM pVM, RTRCPTR pInstrGC) * @param pOpcode Original instruction opcode (out, optional) * @param pSize Original instruction size (out, optional) */ -VMMDECL(bool) PATMIsInt3Patch(PVM pVM, RTRCPTR pInstrGC, uint32_t *pOpcode, uint32_t *pSize) +VMM_INT_DECL(bool) PATMIsInt3Patch(PVM pVM, RTRCPTR pInstrGC, uint32_t *pOpcode, uint32_t *pSize) { PPATMPATCHREC pRec; + Assert(!HMIsEnabled(pVM)); pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC); if ( pRec @@ -379,6 +442,7 @@ VMMDECL(bool) PATMIsInt3Patch(PVM pVM, RTRCPTR pInstrGC, uint32_t *pOpcode, uint VMMDECL(int) PATMSysCall(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu) { PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu0(pVM)); + AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE); if (pCpu->pCurInstr->uOpcode == OP_SYSENTER) { @@ -449,7 +513,7 @@ end: * @param pBranchTarget Original branch target * @param pRelBranchPatch Relative duplicated function address */ -VMMDECL(int) PATMAddBranchToLookupCache(PVM pVM, RTRCPTR pJumpTableGC, RTRCPTR pBranchTarget, RTRCUINTPTR pRelBranchPatch) +int patmAddBranchToLookupCache(PVM pVM, RTRCPTR pJumpTableGC, RTRCPTR pBranchTarget, RTRCUINTPTR pRelBranchPatch) { PPATCHJUMPTABLE pJumpTable; @@ -515,7 +579,7 @@ VMMDECL(int) PATMAddBranchToLookupCache(PVM pVM, RTRCPTR pJumpTableGC, RTRCPTR p * @param opcode DIS instruction opcode * @param fPatchFlags Patch flags */ -VMMDECL(const char *) patmGetInstructionString(uint32_t opcode, uint32_t fPatchFlags) +const char *patmGetInstructionString(uint32_t opcode, uint32_t fPatchFlags) { const char *pszInstr = NULL; @@ -625,9 +689,9 @@ VMMDECL(const char *) patmGetInstructionString(uint32_t opcode, uint32_t fPatchF break; case OP_MOV: if (fPatchFlags & PATMFL_IDTHANDLER) - { pszInstr = "mov (Int/Trap Handler)"; - } + else + pszInstr = "mov (cs)"; break; case OP_SYSENTER: pszInstr = "sysenter"; diff --git a/src/VBox/VMM/VMMAll/PDMAll.cpp b/src/VBox/VMM/VMMAll/PDMAll.cpp index b61beabe..29eefb92 100644 --- a/src/VBox/VMM/VMMAll/PDMAll.cpp +++ b/src/VBox/VMM/VMMAll/PDMAll.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -51,13 +51,13 @@ VMMDECL(int) PDMGetInterrupt(PVMCPU pVCpu, uint8_t *pu8Interrupt) /* * The local APIC has a higher priority than the PIC. */ - if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC)) + if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC)) { VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); Assert(pVM->pdm.s.Apic.CTX_SUFF(pDevIns)); Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnGetInterrupt)); uint32_t uTagSrc; - int i = pVM->pdm.s.Apic.CTX_SUFF(pfnGetInterrupt)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), &uTagSrc); + int i = pVM->pdm.s.Apic.CTX_SUFF(pfnGetInterrupt)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu, &uTagSrc); AssertMsg(i <= 255 && i >= 0, ("i=%d\n", i)); if (i >= 0) { @@ -71,7 +71,7 @@ VMMDECL(int) PDMGetInterrupt(PVMCPU pVCpu, uint8_t *pu8Interrupt) /* * Check the PIC. */ - if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC)) + if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC)) { VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); Assert(pVM->pdm.s.Pic.CTX_SUFF(pDevIns)); @@ -199,31 +199,50 @@ VMM_INT_DECL(int) PDMIoApicSendMsi(PVM pVM, RTGCPHYS GCAddr, uint32_t uValue, ui /** - * Returns presence of an IO-APIC + * Returns the presence of an IO-APIC. * - * @returns VBox true if IO-APIC is present + * @returns VBox true if an IO-APIC is present. * @param pVM Pointer to the VM. */ -VMMDECL(bool) PDMHasIoApic(PVM pVM) +VMM_INT_DECL(bool) PDMHasIoApic(PVM pVM) { return pVM->pdm.s.IoApic.CTX_SUFF(pDevIns) != NULL; } /** + * Returns the presence of a Local APIC. + * + * @returns VBox true if a Local APIC is present. + * @param pVM Pointer to the VM. + */ +VMM_INT_DECL(bool) PDMHasApic(PVM pVM) +{ + return pVM->pdm.s.Apic.CTX_SUFF(pDevIns) != NULL; +} + + +/** * Set the APIC base. * * @returns VBox status code. - * @param pVM Pointer to the VM. + * @param pVM Pointer to the VMCPU. * @param u64Base The new base. */ -VMMDECL(int) PDMApicSetBase(PVM pVM, uint64_t u64Base) +VMMDECL(int) PDMApicSetBase(PVMCPU pVCpu, uint64_t u64Base) { + PVM pVM = pVCpu->CTX_SUFF(pVM); if (pVM->pdm.s.Apic.CTX_SUFF(pDevIns)) { Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnSetBase)); pdmLock(pVM); - pVM->pdm.s.Apic.CTX_SUFF(pfnSetBase)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), u64Base); + pVM->pdm.s.Apic.CTX_SUFF(pfnSetBase)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu, u64Base); + + /* Update CPUM's copy of the APIC base. */ + PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); + Assert(pCtx); + pCtx->msrApicBase = pVM->pdm.s.Apic.CTX_SUFF(pfnGetBase)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu); + pdmUnlock(pVM); return VINF_SUCCESS; } @@ -232,19 +251,23 @@ VMMDECL(int) PDMApicSetBase(PVM pVM, uint64_t u64Base) /** - * Get the APIC base. + * Get the APIC base from the APIC device. This is slow and involves + * taking the PDM lock, this is currently only used by CPUM to cache the APIC + * base once (during init./load state), all other callers should use + * PDMApicGetBase() and not this function. * * @returns VBox status code. - * @param pVM Pointer to the VM. + * @param pVM Pointer to the VMCPU. * @param pu64Base Where to store the APIC base. */ -VMMDECL(int) PDMApicGetBase(PVM pVM, uint64_t *pu64Base) +VMMDECL(int) PDMApicGetBase(PVMCPU pVCpu, uint64_t *pu64Base) { + PVM pVM = pVCpu->CTX_SUFF(pVM); if (pVM->pdm.s.Apic.CTX_SUFF(pDevIns)) { Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnGetBase)); pdmLock(pVM); - *pu64Base = pVM->pdm.s.Apic.CTX_SUFF(pfnGetBase)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns)); + *pu64Base = pVM->pdm.s.Apic.CTX_SUFF(pfnGetBase)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu); pdmUnlock(pVM); return VINF_SUCCESS; } @@ -257,16 +280,18 @@ VMMDECL(int) PDMApicGetBase(PVM pVM, uint64_t *pu64Base) * Check if the APIC has a pending interrupt/if a TPR change would active one. * * @returns VINF_SUCCESS or VERR_PDM_NO_APIC_INSTANCE. - * @param pDevIns Device instance of the APIC. + * @param pVCpu Pointer to the VMCPU. * @param pfPending Pending state (out). */ -VMMDECL(int) PDMApicHasPendingIrq(PVM pVM, bool *pfPending) +VMM_INT_DECL(int) PDMApicHasPendingIrq(PVMCPU pVCpu, bool *pfPending) { + PVM pVM = pVCpu->CTX_SUFF(pVM); if (pVM->pdm.s.Apic.CTX_SUFF(pDevIns)) { Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnSetTPR)); pdmLock(pVM); - *pfPending = pVM->pdm.s.Apic.CTX_SUFF(pfnHasPendingIrq)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns)); + *pfPending = pVM->pdm.s.Apic.CTX_SUFF(pfnHasPendingIrq)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu, + NULL /* pu8PendingIrq */); pdmUnlock(pVM); return VINF_SUCCESS; } @@ -299,23 +324,30 @@ VMMDECL(int) PDMApicSetTPR(PVMCPU pVCpu, uint8_t u8TPR) /** * Get the TPR (task priority register). * - * @returns The current TPR. + * @returns VINF_SUCCESS or VERR_PDM_NO_APIC_INSTANCE. * @param pVCpu Pointer to the VMCPU. * @param pu8TPR Where to store the TRP. - * @param pfPending Pending interrupt state (out). -*/ -VMMDECL(int) PDMApicGetTPR(PVMCPU pVCpu, uint8_t *pu8TPR, bool *pfPending) + * @param pfPending Pending interrupt state (out, optional). + * @param pu8PendingIrq Where to store the highest-priority pending IRQ + * (out, optional). + * + * @remarks No-long-jump zone!!! + */ +VMMDECL(int) PDMApicGetTPR(PVMCPU pVCpu, uint8_t *pu8TPR, bool *pfPending, uint8_t *pu8PendingIrq) { - PVM pVM = pVCpu->CTX_SUFF(pVM); - if (pVM->pdm.s.Apic.CTX_SUFF(pDevIns)) + PVM pVM = pVCpu->CTX_SUFF(pVM); + PPDMDEVINS pApicIns = pVM->pdm.s.Apic.CTX_SUFF(pDevIns); + if (pApicIns) { - Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnGetTPR)); - /* We don't acquire the PDM lock here as we're just reading information. Doing so causes massive - * contention as this function is called very often by each and every VCPU. + /* + * Note! We don't acquire the PDM lock here as we're just reading + * information. Doing so causes massive contention as this + * function is called very often by each and every VCPU. */ - *pu8TPR = pVM->pdm.s.Apic.CTX_SUFF(pfnGetTPR)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu); + Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnGetTPR)); + *pu8TPR = pVM->pdm.s.Apic.CTX_SUFF(pfnGetTPR)(pApicIns, pVCpu->idCpu); if (pfPending) - *pfPending = pVM->pdm.s.Apic.CTX_SUFF(pfnHasPendingIrq)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns)); + *pfPending = pVM->pdm.s.Apic.CTX_SUFF(pfnHasPendingIrq)(pApicIns, pVCpu->idCpu, pu8PendingIrq); return VINF_SUCCESS; } *pu8TPR = 0; @@ -332,7 +364,7 @@ VMMDECL(int) PDMApicGetTPR(PVMCPU pVCpu, uint8_t *pu8TPR, bool *pfPending) * @param u32Reg MSR to write. * @param u64Value Value to write. */ -VMMDECL(int) PDMApicWriteMSR(PVM pVM, VMCPUID iCpu, uint32_t u32Reg, uint64_t u64Value) +VMM_INT_DECL(int) PDMApicWriteMSR(PVM pVM, VMCPUID iCpu, uint32_t u32Reg, uint64_t u64Value) { if (pVM->pdm.s.Apic.CTX_SUFF(pDevIns)) { @@ -352,7 +384,7 @@ VMMDECL(int) PDMApicWriteMSR(PVM pVM, VMCPUID iCpu, uint32_t u32Reg, uint64_t u6 * @param u32Reg MSR to read. * @param pu64Value Value read. */ -VMMDECL(int) PDMApicReadMSR(PVM pVM, VMCPUID iCpu, uint32_t u32Reg, uint64_t *pu64Value) +VMM_INT_DECL(int) PDMApicReadMSR(PVM pVM, VMCPUID iCpu, uint32_t u32Reg, uint64_t *pu64Value) { if (pVM->pdm.s.Apic.CTX_SUFF(pDevIns)) { @@ -416,12 +448,12 @@ void pdmUnlock(PVM pVM) * @param pv Ring-3 pointer. * @param pGCPhys GC phys address (out). */ -VMMDECL(int) PDMVMMDevHeapR3ToGCPhys(PVM pVM, RTR3PTR pv, RTGCPHYS *pGCPhys) +VMM_INT_DECL(int) PDMVmmDevHeapR3ToGCPhys(PVM pVM, RTR3PTR pv, RTGCPHYS *pGCPhys) { /* Don't assert here as this is called before we can catch ring-0 assertions. */ if (RT_UNLIKELY((RTR3UINTPTR)pv - (RTR3UINTPTR)pVM->pdm.s.pvVMMDevHeap >= pVM->pdm.s.cbVMMDevHeap)) { - Log(("PDMVMMDevHeapR3ToGCPhys: pv=%p pvVMMDevHeap=%p cbVMMDevHeap=%#x\n", + Log(("PDMVmmDevHeapR3ToGCPhys: pv=%p pvVMMDevHeap=%p cbVMMDevHeap=%#x\n", pv, pVM->pdm.s.pvVMMDevHeap, pVM->pdm.s.cbVMMDevHeap)); return VERR_PDM_DEV_HEAP_R3_TO_GCPHYS; } @@ -436,7 +468,7 @@ VMMDECL(int) PDMVMMDevHeapR3ToGCPhys(PVM pVM, RTR3PTR pv, RTGCPHYS *pGCPhys) * @returns dev heap enabled status (true/false) * @param pVM Pointer to the VM. */ -VMMDECL(bool) PDMVMMDevHeapIsEnabled(PVM pVM) +VMM_INT_DECL(bool) PDMVmmDevHeapIsEnabled(PVM pVM) { return (pVM->pdm.s.pvVMMDevHeap != NULL); } diff --git a/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp b/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp index 55099ecb..db903792 100644 --- a/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp +++ b/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp @@ -1,10 +1,10 @@ /* $Id: PDMAllCritSect.cpp $ */ /** @file - * PDM - Critical Sections, All Contexts. + * PDM - Write-Only Critical Section, All Contexts. */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -26,7 +26,7 @@ #include <VBox/vmm/vmm.h> #include <VBox/vmm/vm.h> #include <VBox/err.h> -#include <VBox/vmm/hwaccm.h> +#include <VBox/vmm/hm.h> #include <VBox/log.h> #include <iprt/asm.h> @@ -111,7 +111,9 @@ DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHR /** * Deals with the contended case in ring-3 and ring-0. * - * @returns VINF_SUCCESS or VERR_SEM_DESTROYED. + * @retval VINF_SUCCESS on success. + * @retval VERR_SEM_DESTROYED if destroyed. + * * @param pCritSect The critsect. * @param hNativeSelf The native thread handle. */ @@ -145,25 +147,53 @@ static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD # endif for (;;) { -# ifdef PDMCRITSECT_STRICT + /* + * Do the wait. + * + * In ring-3 this gets cluttered by lock validation and thread state + * maintainence. + * + * In ring-0 we have to deal with the possibility that the thread has + * been signalled and the interruptible wait function returning + * immediately. In that case we do normal R0/RC rcBusy handling. + */ +# ifdef IN_RING3 +# ifdef PDMCRITSECT_STRICT int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING), RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true); if (RT_FAILURE(rc9)) return rc9; -# elif defined(IN_RING3) +# else RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true); -# endif +# endif int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT); -# ifdef IN_RING3 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT); -# endif +# else /* IN_RING0 */ + int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT); +# endif /* IN_RING0 */ + /* + * Deal with the return code and critsect destruction. + */ if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC)) return VERR_SEM_DESTROYED; if (rc == VINF_SUCCESS) return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos); AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc)); + +# ifdef IN_RING0 + /* Something is pending (signal, APC, debugger, whatever), just go back + to ring-3 so the kernel can deal with it when leaving kernel context. + + Note! We've incremented cLockers already and cannot safely decrement + it without creating a race with PDMCritSectLeave, resulting in + spurious wakeups. */ + PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM); + PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); + rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL); + AssertRC(rc); +# endif } /* won't get here */ } @@ -175,7 +205,8 @@ static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD * * @returns VINF_SUCCESS if entered successfully. * @returns rcBusy when encountering a busy critical section in GC/R0. - * @returns VERR_SEM_DESTROYED if the critical section is dead. + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. * * @param pCritSect The PDM critical section to enter. * @param rcBusy The status code to return when we're in GC or R0 @@ -253,7 +284,7 @@ DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRT * use PDMCritSectTryEnter. */ { /* - * Leave HWACCM context while waiting if necessary. + * Leave HM context while waiting if necessary. */ int rc; if (RTThreadPreemptIsEnabled(NIL_RTTHREAD)) @@ -266,13 +297,13 @@ DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRT STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000); PVM pVM = pCritSect->s.CTX_SUFF(pVM); PVMCPU pVCpu = VMMGetCpu(pVM); - HWACCMR0Leave(pVM, pVCpu); + HMR0Leave(pVM, pVCpu); RTThreadPreemptRestore(NIL_RTTHREAD, ????); rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos); RTThreadPreemptDisable(NIL_RTTHREAD, ????); - HWACCMR0Enter(pVM, pVCpu); + HMR0Enter(pVM, pVCpu); } return rc; } @@ -311,11 +342,12 @@ DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRT * Enters a PDM critical section. * * @returns VINF_SUCCESS if entered successfully. - * @returns rcBusy when encountering a busy critical section in GC/R0. - * @returns VERR_SEM_DESTROYED if the critical section is dead. + * @returns rcBusy when encountering a busy critical section in RC/R0. + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. * * @param pCritSect The PDM critical section to enter. - * @param rcBusy The status code to return when we're in GC or R0 + * @param rcBusy The status code to return when we're in RC or R0 * and the section is busy. Pass VINF_SUCCESS to * acquired the critical section thru a ring-3 * call if necessary. @@ -335,11 +367,12 @@ VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy) * Enters a PDM critical section, with location information for debugging. * * @returns VINF_SUCCESS if entered successfully. - * @returns rcBusy when encountering a busy critical section in GC/R0. - * @returns VERR_SEM_DESTROYED if the critical section is dead. + * @returns rcBusy when encountering a busy critical section in RC/R0. + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. * * @param pCritSect The PDM critical section to enter. - * @param rcBusy The status code to return when we're in GC or R0 + * @param rcBusy The status code to return when we're in RC or R0 * and the section is busy. Pass VINF_SUCCESS to * acquired the critical section thru a ring-3 * call if necessary. @@ -369,7 +402,8 @@ VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTP * @retval VINF_SUCCESS on success. * @retval VERR_SEM_BUSY if the critsect was owned. * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) - * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting. + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. * * @param pCritSect The critical section. */ @@ -424,7 +458,8 @@ static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos * @retval VINF_SUCCESS on success. * @retval VERR_SEM_BUSY if the critsect was owned. * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) - * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting. + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. * * @param pCritSect The critical section. */ @@ -445,7 +480,8 @@ VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect) * @retval VINF_SUCCESS on success. * @retval VERR_SEM_BUSY if the critsect was owned. * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) - * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting. + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. * * @param pCritSect The critical section. * @param uId Some kind of locking location ID. Typically a @@ -474,7 +510,8 @@ VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, R * * @returns VINF_SUCCESS if entered successfully. * @returns rcBusy when encountering a busy critical section in GC/R0. - * @returns VERR_SEM_DESTROYED if the critical section is dead. + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. * * @param pCritSect The PDM critical section to enter. * @param fCallRing3 Whether this is a VMMRZCallRing3()request. @@ -622,8 +659,8 @@ VMMDECL(int) PDMCritSectLeave(PPDMCRITSECT pCritSect) PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++; LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect)); - AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves)); - pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect); + AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves)); + pVCpu->pdm.s.apQueuedCritSectLeaves[i] = MMHyperCCToR3(pVM, pCritSect); VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves); @@ -635,35 +672,6 @@ VMMDECL(int) PDMCritSectLeave(PPDMCRITSECT pCritSect) } -#if defined(IN_RING3) || defined(IN_RING0) -/** - * Process the critical sections queued for ring-3 'leave'. - * - * @param pVCpu Pointer to the VMCPU. - */ -VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu) -{ - Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0); - - const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves; - for (RTUINT i = 0; i < c; i++) - { -# ifdef IN_RING3 - PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i]; -# else - PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]); -# endif - - PDMCritSectLeave(pCritSect); - LogFlow(("PDMR3CritSectFF: %p\n", pCritSect)); - } - - pVCpu->pdm.s.cQueuedCritSectLeaves = 0; - VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT); -} -#endif /* IN_RING3 || IN_RING0 */ - - /** * Checks the caller is the owner of the critical section. * @@ -747,3 +755,4 @@ VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect) { return RTCritSectGetRecursion(&pCritSect->s.Core); } + diff --git a/src/VBox/VMM/VMMAll/PDMAllCritSectBoth.cpp b/src/VBox/VMM/VMMAll/PDMAllCritSectBoth.cpp new file mode 100644 index 00000000..1ea49568 --- /dev/null +++ b/src/VBox/VMM/VMMAll/PDMAllCritSectBoth.cpp @@ -0,0 +1,97 @@ +/* $Id: PDMAllCritSectBoth.cpp $ */ +/** @file + * PDM - Code Common to Both Critical Section Types, All Contexts. + */ + +/* + * Copyright (C) 2006-2013 Oracle Corporation + * + * This file is part of VirtualBox Open Source Edition (OSE), as + * available from http://www.virtualbox.org. This file is free software; + * you can redistribute it and/or modify it under the terms of the GNU + * General Public License (GPL) as published by the Free Software + * Foundation, in version 2 as it comes in the "COPYING" file of the + * VirtualBox OSE distribution. VirtualBox OSE is distributed in the + * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. + */ + + +/******************************************************************************* +* Header Files * +*******************************************************************************/ +#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT +#include "PDMInternal.h" +#include <VBox/vmm/pdmcritsect.h> +#include <VBox/vmm/pdmcritsectrw.h> +#include <VBox/vmm/vm.h> +#include <VBox/err.h> + +#include <VBox/log.h> +#include <iprt/assert.h> +#include <iprt/asm.h> + + +#if defined(IN_RING3) || defined(IN_RING0) +/** + * Process the critical sections (both types) queued for ring-3 'leave'. + * + * @param pVCpu Pointer to the VMCPU. + */ +VMM_INT_DECL(void) PDMCritSectBothFF(PVMCPU pVCpu) +{ + uint32_t i; + Assert( pVCpu->pdm.s.cQueuedCritSectLeaves > 0 + || pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves > 0 + || pVCpu->pdm.s.cQueuedCritSectRwExclLeaves > 0); + + /* Shared leaves. */ + i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves; + pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves = 0; + while (i-- > 0) + { +# ifdef IN_RING3 + PPDMCRITSECTRW pCritSectRw = pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i]; +# else + PPDMCRITSECTRW pCritSectRw = (PPDMCRITSECTRW)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), + pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i]); +# endif + + pdmCritSectRwLeaveSharedQueued(pCritSectRw); + LogFlow(("PDMR3CritSectFF: %p (R/W)\n", pCritSectRw)); + } + + /* Last, exclusive leaves. */ + i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves; + pVCpu->pdm.s.cQueuedCritSectRwExclLeaves = 0; + while (i-- > 0) + { +# ifdef IN_RING3 + PPDMCRITSECTRW pCritSectRw = pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i]; +# else + PPDMCRITSECTRW pCritSectRw = (PPDMCRITSECTRW)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), + pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i]); +# endif + + pdmCritSectRwLeaveExclQueued(pCritSectRw); + LogFlow(("PDMR3CritSectFF: %p (R/W)\n", pCritSectRw)); + } + + /* Normal leaves. */ + i = pVCpu->pdm.s.cQueuedCritSectLeaves; + pVCpu->pdm.s.cQueuedCritSectLeaves = 0; + while (i-- > 0) + { +# ifdef IN_RING3 + PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectLeaves[i]; +# else + PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectLeaves[i]); +# endif + + PDMCritSectLeave(pCritSect); + LogFlow(("PDMR3CritSectFF: %p\n", pCritSect)); + } + + VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT); +} +#endif /* IN_RING3 || IN_RING0 */ + diff --git a/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp b/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp new file mode 100644 index 00000000..3f751a1b --- /dev/null +++ b/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp @@ -0,0 +1,1432 @@ +/* $Id: PDMAllCritSectRw.cpp $ */ +/** @file + * IPRT - Read/Write Critical Section, Generic. + */ + +/* + * Copyright (C) 2009-2013 Oracle Corporation + * + * This file is part of VirtualBox Open Source Edition (OSE), as + * available from http://www.virtualbox.org. This file is free software; + * you can redistribute it and/or modify it under the terms of the GNU + * General Public License (GPL) as published by the Free Software + * Foundation, in version 2 as it comes in the "COPYING" file of the + * VirtualBox OSE distribution. VirtualBox OSE is distributed in the + * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. + */ + + +/******************************************************************************* +* Header Files * +*******************************************************************************/ +#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT +#include "PDMInternal.h" +#include <VBox/vmm/pdmcritsectrw.h> +#include <VBox/vmm/mm.h> +#include <VBox/vmm/vmm.h> +#include <VBox/vmm/vm.h> +#include <VBox/err.h> +#include <VBox/vmm/hm.h> + +#include <VBox/log.h> +#include <iprt/asm.h> +#include <iprt/asm-amd64-x86.h> +#include <iprt/assert.h> +#ifdef IN_RING3 +# include <iprt/lockvalidator.h> +# include <iprt/semaphore.h> +#endif +#if defined(IN_RING3) || defined(IN_RING0) +# include <iprt/thread.h> +#endif + + +/******************************************************************************* +* Defined Constants And Macros * +*******************************************************************************/ +/** The number loops to spin for shared access in ring-3. */ +#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20 +/** The number loops to spin for shared access in ring-0. */ +#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128 +/** The number loops to spin for shared access in the raw-mode context. */ +#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128 + +/** The number loops to spin for exclusive access in ring-3. */ +#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20 +/** The number loops to spin for exclusive access in ring-0. */ +#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256 +/** The number loops to spin for exclusive access in the raw-mode context. */ +#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256 + + +/* Undefine the automatic VBOX_STRICT API mappings. */ +#undef PDMCritSectRwEnterExcl +#undef PDMCritSectRwTryEnterExcl +#undef PDMCritSectRwEnterShared +#undef PDMCritSectRwTryEnterShared + + +/** + * Gets the ring-3 native thread handle of the calling thread. + * + * @returns native thread handle (ring-3). + * @param pThis The read/write critical section. This is only used in + * R0 and RC. + */ +DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PCPDMCRITSECTRW pThis) +{ +#ifdef IN_RING3 + NOREF(pThis); + RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf(); +#else + AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic), + NIL_RTNATIVETHREAD); + PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM); + PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); + RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD); +#endif + return hNativeSelf; +} + + + + + +#ifdef IN_RING3 +/** + * Changes the lock validator sub-class of the read/write critical section. + * + * It is recommended to try make sure that nobody is using this critical section + * while changing the value. + * + * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the + * lock validator isn't compiled in or either of the parameters are + * invalid. + * @param pThis Pointer to the read/write critical section. + * @param uSubClass The new sub-class value. + */ +VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass) +{ + AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID); + AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID); +# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) + AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID); + + RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass); + return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass); +# else + NOREF(uSubClass); + return RTLOCKVAL_SUB_CLASS_INVALID; +# endif +} +#endif /* IN_RING3 */ + + +#ifdef IN_RING0 +/** + * Go back to ring-3 so the kernel can do signals, APCs and other fun things. + * + * @param pThis Pointer to the read/write critical section. + */ +static void pdmR0CritSectRwYieldToRing3(PPDMCRITSECTRW pThis) +{ + PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM); + PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); + int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL); + AssertRC(rc); +} +#endif /* IN_RING0 */ + + +/** + * Worker that enters a read/write critical section with shard access. + * + * @returns VBox status code. + * @param pThis Pointer to the read/write critical section. + * @param rcBusy The busy return code for ring-0 and ring-3. + * @param fTryOnly Only try enter it, don't wait. + * @param pSrcPos The source position. (Can be NULL.) + * @param fNoVal No validation records. + */ +static int pdmCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal) +{ + /* + * Validate input. + */ + AssertPtr(pThis); + AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED); + +#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) + RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); + if (!fTryOnly) + { + int rc9; + RTNATIVETHREAD hNativeWriter; + ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter); + if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pThis)) + rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT); + else + rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT); + if (RT_FAILURE(rc9)) + return rc9; + } +#endif + + /* + * Get cracking... + */ + uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); + uint64_t u64OldState = u64State; + + for (;;) + { + if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT)) + { + /* It flows in the right direction, try follow it before it changes. */ + uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; + c++; + Assert(c < RTCSRW_CNT_MASK / 2); + u64State &= ~RTCSRW_CNT_RD_MASK; + u64State |= c << RTCSRW_CNT_RD_SHIFT; + if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) + { +#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) + if (!fNoVal) + RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos); +#endif + break; + } + } + else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0) + { + /* Wrong direction, but we're alone here and can simply try switch the direction. */ + u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK); + u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT); + if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) + { + Assert(!pThis->s.Core.fNeedReset); +#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) + if (!fNoVal) + RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos); +#endif + break; + } + } + else + { + /* Is the writer perhaps doing a read recursion? */ + RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis); + RTNATIVETHREAD hNativeWriter; + ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter); + if (hNativeSelf == hNativeWriter) + { +#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) + if (!fNoVal) + { + int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos); + if (RT_FAILURE(rc9)) + return rc9; + } +#endif + Assert(pThis->s.Core.cWriterReads < UINT32_MAX / 2); + ASMAtomicIncU32(&pThis->s.Core.cWriterReads); + STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared)); + return VINF_SUCCESS; /* don't break! */ + } + + /* + * If we're only trying, return already. + */ + if (fTryOnly) + { + STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared)); + return VERR_SEM_BUSY; + } + +#if defined(IN_RING3) || defined(IN_RING0) +# ifdef IN_RING0 + if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD) + && ASMIntAreEnabled()) +# endif + { + /* + * Add ourselves to the queue and wait for the direction to change. + */ + uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; + c++; + Assert(c < RTCSRW_CNT_MASK / 2); + + uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; + cWait++; + Assert(cWait <= c); + Assert(cWait < RTCSRW_CNT_MASK / 2); + + u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK); + u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT); + + if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) + { + for (uint32_t iLoop = 0; ; iLoop++) + { + int rc; +# ifdef IN_RING3 +# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) + rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true, + RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false); + if (RT_SUCCESS(rc)) +# else + RTTHREAD hThreadSelf = RTThreadSelf(); + RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false); +# endif +# endif + { + for (;;) + { + rc = SUPSemEventMultiWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession, + (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, + RT_INDEFINITE_WAIT); + if ( rc != VERR_INTERRUPTED + || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) + break; +# ifdef IN_RING0 + pdmR0CritSectRwYieldToRing3(pThis); +# endif + } +# ifdef IN_RING3 + RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ); +# endif + if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) + return VERR_SEM_DESTROYED; + } + if (RT_FAILURE(rc)) + { + /* Decrement the counts and return the error. */ + for (;;) + { + u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); + c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; Assert(c > 0); + c--; + cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0); + cWait--; + u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK); + u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT); + if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) + break; + } + return rc; + } + + Assert(pThis->s.Core.fNeedReset); + u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); + if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT)) + break; + AssertMsg(iLoop < 1, ("%u\n", iLoop)); + } + + /* Decrement the wait count and maybe reset the semaphore (if we're last). */ + for (;;) + { + u64OldState = u64State; + + cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; + Assert(cWait > 0); + cWait--; + u64State &= ~RTCSRW_WAIT_CNT_RD_MASK; + u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT; + + if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) + { + if (cWait == 0) + { + if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false)) + { + int rc = SUPSemEventMultiReset(pThis->s.CTX_SUFF(pVM)->pSession, + (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead); + AssertRCReturn(rc, rc); + } + } + break; + } + u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); + } + +# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) + if (!fNoVal) + RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos); +# endif + break; + } + } +#endif /* IN_RING3 || IN_RING3 */ +#ifndef IN_RING3 +# ifdef IN_RING0 + else +# endif + { + /* + * We cannot call SUPSemEventMultiWaitNoResume in this context. Go + * back to ring-3 and do it there or return rcBusy. + */ + STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared)); + if (rcBusy == VINF_SUCCESS) + { + PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM); + PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); + /** @todo Should actually do this in via VMMR0.cpp instead of going all the way + * back to ring-3. Goes for both kind of crit sects. */ + return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis)); + } + return rcBusy; + } +#endif /* !IN_RING3 */ + } + + if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) + return VERR_SEM_DESTROYED; + + ASMNopPause(); + u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); + u64OldState = u64State; + } + + /* got it! */ + STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared)); + Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT)); + return VINF_SUCCESS; + +} + + +/** + * Enter a critical section with shared (read) access. + * + * @returns VBox status code. + * @retval VINF_SUCCESS on success. + * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy. + * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. + * + * @param pThis Pointer to the read/write critical section. + * @param rcBusy The status code to return when we're in RC or R0 and the + * section is busy. Pass VINF_SUCCESS to acquired the + * critical section thru a ring-3 call if necessary. + * @param uId Where we're entering the section. + * @param pszFile The source position - file. + * @param iLine The source position - line. + * @param pszFunction The source position - function. + * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared, + * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared, + * RTCritSectRwEnterShared. + */ +VMMDECL(int) PDMCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy) +{ +#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3) + return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/); +#else + RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API(); + return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/); +#endif +} + + +/** + * Enter a critical section with shared (read) access. + * + * @returns VBox status code. + * @retval VINF_SUCCESS on success. + * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy. + * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. + * + * @param pThis Pointer to the read/write critical section. + * @param rcBusy The status code to return when we're in RC or R0 and the + * section is busy. Pass VINF_SUCCESS to acquired the + * critical section thru a ring-3 call if necessary. + * @param uId Where we're entering the section. + * @param pszFile The source position - file. + * @param iLine The source position - line. + * @param pszFunction The source position - function. + * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared, + * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared, + * RTCritSectRwEnterSharedDebug. + */ +VMMDECL(int) PDMCritSectRwEnterSharedDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL) +{ +#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3) + return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/); +#else + RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API(); + return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/); +#endif +} + + +/** + * Try enter a critical section with shared (read) access. + * + * @returns VBox status code. + * @retval VINF_SUCCESS on success. + * @retval VERR_SEM_BUSY if the critsect was owned. + * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. + * + * @param pThis Pointer to the read/write critical section. + * @param uId Where we're entering the section. + * @param pszFile The source position - file. + * @param iLine The source position - line. + * @param pszFunction The source position - function. + * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared, + * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared, + * RTCritSectRwTryEnterShared. + */ +VMMDECL(int) PDMCritSectRwTryEnterShared(PPDMCRITSECTRW pThis) +{ +#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3) + return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/); +#else + RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API(); + return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/); +#endif +} + + +/** + * Try enter a critical section with shared (read) access. + * + * @returns VBox status code. + * @retval VINF_SUCCESS on success. + * @retval VERR_SEM_BUSY if the critsect was owned. + * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. + * + * @param pThis Pointer to the read/write critical section. + * @param uId Where we're entering the section. + * @param pszFile The source position - file. + * @param iLine The source position - line. + * @param pszFunction The source position - function. + * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared, + * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared, + * RTCritSectRwTryEnterSharedDebug. + */ +VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL) +{ +#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3) + return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/); +#else + RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API(); + return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/); +#endif +} + + +#ifdef IN_RING3 +/** + * Enters a PDM read/write critical section with shared (read) access. + * + * @returns VINF_SUCCESS if entered successfully. + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. + * + * @param pThis Pointer to the read/write critical section. + * @param fCallRing3 Whether this is a VMMRZCallRing3()request. + */ +VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PPDMCRITSECTRW pThis, bool fCallRing3) +{ + return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3); +} +#endif + + +/** + * Leave a critical section held with shared access. + * + * @returns VBox status code. + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. + * @param pThis Pointer to the read/write critical section. + * @param fNoVal No validation records (i.e. queued release). + * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared, + * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug, + * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared. + */ +static int pdmCritSectRwLeaveSharedWorker(PPDMCRITSECTRW pThis, bool fNoVal) +{ + /* + * Validate handle. + */ + AssertPtr(pThis); + AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED); + + /* + * Check the direction and take action accordingly. + */ + uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); + uint64_t u64OldState = u64State; + if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT)) + { +#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) + if (fNoVal) + Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD)); + else + { + int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD); + if (RT_FAILURE(rc9)) + return rc9; + } +#endif + for (;;) + { + uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; + AssertReturn(c > 0, VERR_NOT_OWNER); + c--; + + if ( c > 0 + || (u64State & RTCSRW_CNT_WR_MASK) == 0) + { + /* Don't change the direction. */ + u64State &= ~RTCSRW_CNT_RD_MASK; + u64State |= c << RTCSRW_CNT_RD_SHIFT; + if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) + break; + } + else + { +#if defined(IN_RING3) || defined(IN_RING0) +# ifdef IN_RING0 + if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD) + && ASMIntAreEnabled()) +# endif + { + /* Reverse the direction and signal the writer threads. */ + u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK); + u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT; + if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) + { + int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite); + AssertRC(rc); + break; + } + } +#endif /* IN_RING3 || IN_RING0 */ +#ifndef IN_RING3 +# ifdef IN_RING0 + else +# endif + { + /* Queue the exit request (ring-3). */ + PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM); + PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); + uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++; + LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State)); + AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves)); + pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = MMHyperCCToR3(pVM, pThis); + VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); + VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); + STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves); + STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared); + break; + } +#endif + } + + ASMNopPause(); + u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); + u64OldState = u64State; + } + } + else + { + RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis); + RTNATIVETHREAD hNativeWriter; + ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter); + AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER); + AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER); +#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) + if (!fNoVal) + { + int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core); + if (RT_FAILURE(rc)) + return rc; + } +#endif + ASMAtomicDecU32(&pThis->s.Core.cWriterReads); + } + + return VINF_SUCCESS; +} + +/** + * Leave a critical section held with shared access. + * + * @returns VBox status code. + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. + * @param pThis Pointer to the read/write critical section. + * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared, + * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug, + * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared. + */ +VMMDECL(int) PDMCritSectRwLeaveShared(PPDMCRITSECTRW pThis) +{ + return pdmCritSectRwLeaveSharedWorker(pThis, false /*fNoVal*/); +} + + +#if defined(IN_RING3) || defined(IN_RING0) +/** + * PDMCritSectBothFF interface. + * + * @param pThis Pointer to the read/write critical section. + */ +void pdmCritSectRwLeaveSharedQueued(PPDMCRITSECTRW pThis) +{ + pdmCritSectRwLeaveSharedWorker(pThis, true /*fNoVal*/); +} +#endif + + +/** + * Worker that enters a read/write critical section with exclusive access. + * + * @returns VBox status code. + * @param pThis Pointer to the read/write critical section. + * @param rcBusy The busy return code for ring-0 and ring-3. + * @param fTryOnly Only try enter it, don't wait. + * @param pSrcPos The source position. (Can be NULL.) + * @param fNoVal No validation records. + */ +static int pdmCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal) +{ + /* + * Validate input. + */ + AssertPtr(pThis); + AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED); + +#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) + RTTHREAD hThreadSelf = NIL_RTTHREAD; + if (!fTryOnly) + { + hThreadSelf = RTThreadSelfAutoAdopt(); + int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT); + if (RT_FAILURE(rc9)) + return rc9; + } +#endif + + /* + * Check if we're already the owner and just recursing. + */ + RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis); + RTNATIVETHREAD hNativeWriter; + ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter); + if (hNativeSelf == hNativeWriter) + { + Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)); +#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) + if (!fNoVal) + { + int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos); + if (RT_FAILURE(rc9)) + return rc9; + } +#endif + Assert(pThis->s.Core.cWriteRecursions < UINT32_MAX / 2); + STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl)); + ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions); + return VINF_SUCCESS; + } + + /* + * Get cracking. + */ + uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); + uint64_t u64OldState = u64State; + + for (;;) + { + if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT) + || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0) + { + /* It flows in the right direction, try follow it before it changes. */ + uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; + c++; + Assert(c < RTCSRW_CNT_MASK / 2); + u64State &= ~RTCSRW_CNT_WR_MASK; + u64State |= c << RTCSRW_CNT_WR_SHIFT; + if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) + break; + } + else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0) + { + /* Wrong direction, but we're alone here and can simply try switch the direction. */ + u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK); + u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT); + if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) + break; + } + else if (fTryOnly) + { + /* Wrong direction and we're not supposed to wait, just return. */ + STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl)); + return VERR_SEM_BUSY; + } + else + { + /* Add ourselves to the write count and break out to do the wait. */ + uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; + c++; + Assert(c < RTCSRW_CNT_MASK / 2); + u64State &= ~RTCSRW_CNT_WR_MASK; + u64State |= c << RTCSRW_CNT_WR_SHIFT; + if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) + break; + } + + if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) + return VERR_SEM_DESTROYED; + + ASMNopPause(); + u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); + u64OldState = u64State; + } + + /* + * If we're in write mode now try grab the ownership. Play fair if there + * are threads already waiting. + */ + bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT) +#if defined(IN_RING3) + && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1 + || fTryOnly) +#endif + ; + if (fDone) + ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone); + if (!fDone) + { + STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl)); + +#if defined(IN_RING3) || defined(IN_RING0) + if ( !fTryOnly +# ifdef IN_RING0 + && RTThreadPreemptIsEnabled(NIL_RTTHREAD) + && ASMIntAreEnabled() +# endif + ) + { + + /* + * Wait for our turn. + */ + for (uint32_t iLoop = 0; ; iLoop++) + { + int rc; +# ifdef IN_RING3 +# ifdef PDMCRITSECTRW_STRICT + if (hThreadSelf == NIL_RTTHREAD) + hThreadSelf = RTThreadSelfAutoAdopt(); + rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true, + RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false); + if (RT_SUCCESS(rc)) +# else + RTTHREAD hThreadSelf = RTThreadSelf(); + RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false); +# endif +# endif + { + for (;;) + { + rc = SUPSemEventWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession, + (SUPSEMEVENT)pThis->s.Core.hEvtWrite, + RT_INDEFINITE_WAIT); + if ( rc != VERR_INTERRUPTED + || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) + break; +# ifdef IN_RING0 + pdmR0CritSectRwYieldToRing3(pThis); +# endif + } +# ifdef IN_RING3 + RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE); +# endif + if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) + return VERR_SEM_DESTROYED; + } + if (RT_FAILURE(rc)) + { + /* Decrement the counts and return the error. */ + for (;;) + { + u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); + uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0); + c--; + u64State &= ~RTCSRW_CNT_WR_MASK; + u64State |= c << RTCSRW_CNT_WR_SHIFT; + if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) + break; + } + return rc; + } + + u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); + if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)) + { + ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone); + if (fDone) + break; + } + AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */ + } + + } + else +#endif /* IN_RING3 || IN_RING0 */ + { +#ifdef IN_RING3 + /* TryEnter call - decrement the number of (waiting) writers. */ +#else + /* We cannot call SUPSemEventWaitNoResume in this context. Go back to + ring-3 and do it there or return rcBusy. */ +#endif + + for (;;) + { + u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); + uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0); + c--; + u64State &= ~RTCSRW_CNT_WR_MASK; + u64State |= c << RTCSRW_CNT_WR_SHIFT; + if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) + break; + } + +#ifdef IN_RING3 + return VERR_SEM_BUSY; +#else + if (rcBusy == VINF_SUCCESS) + { + Assert(!fTryOnly); + PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM); + PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); + /** @todo Should actually do this in via VMMR0.cpp instead of going all the way + * back to ring-3. Goes for both kind of crit sects. */ + return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis)); + } + return rcBusy; +#endif + } + } + + /* + * Got it! + */ + Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)); + ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1); + Assert(pThis->s.Core.cWriterReads == 0); +#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) + if (!fNoVal) + RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true); +#endif + STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl)); + STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl); + + return VINF_SUCCESS; +} + + +/** + * Try enter a critical section with exclusive (write) access. + * + * @returns VBox status code. + * @retval VINF_SUCCESS on success. + * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy. + * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. + * + * @param pThis Pointer to the read/write critical section. + * @param rcBusy The status code to return when we're in RC or R0 and the + * section is busy. Pass VINF_SUCCESS to acquired the + * critical section thru a ring-3 call if necessary. + * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl, + * PDMCritSectRwTryEnterExclDebug, + * PDMCritSectEnterDebug, PDMCritSectEnter, + * RTCritSectRwEnterExcl. + */ +VMMDECL(int) PDMCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy) +{ +#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3) + return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/); +#else + RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API(); + return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/); +#endif +} + + +/** + * Try enter a critical section with exclusive (write) access. + * + * @returns VBox status code. + * @retval VINF_SUCCESS on success. + * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy. + * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. + * + * @param pThis Pointer to the read/write critical section. + * @param rcBusy The status code to return when we're in RC or R0 and the + * section is busy. Pass VINF_SUCCESS to acquired the + * critical section thru a ring-3 call if necessary. + * @param uId Where we're entering the section. + * @param pszFile The source position - file. + * @param iLine The source position - line. + * @param pszFunction The source position - function. + * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl, + * PDMCritSectRwTryEnterExclDebug, + * PDMCritSectEnterDebug, PDMCritSectEnter, + * RTCritSectRwEnterExclDebug. + */ +VMMDECL(int) PDMCritSectRwEnterExclDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL) +{ +#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3) + return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/); +#else + RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API(); + return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/); +#endif +} + + +/** + * Try enter a critical section with exclusive (write) access. + * + * @retval VINF_SUCCESS on success. + * @retval VERR_SEM_BUSY if the critsect was owned. + * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. + * + * @param pThis Pointer to the read/write critical section. + * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug, + * PDMCritSectRwEnterExclDebug, + * PDMCritSectTryEnter, PDMCritSectTryEnterDebug, + * RTCritSectRwTryEnterExcl. + */ +VMMDECL(int) PDMCritSectRwTryEnterExcl(PPDMCRITSECTRW pThis) +{ +#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3) + return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/); +#else + RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API(); + return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/); +#endif +} + + +/** + * Try enter a critical section with exclusive (write) access. + * + * @retval VINF_SUCCESS on success. + * @retval VERR_SEM_BUSY if the critsect was owned. + * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. + * + * @param pThis Pointer to the read/write critical section. + * @param uId Where we're entering the section. + * @param pszFile The source position - file. + * @param iLine The source position - line. + * @param pszFunction The source position - function. + * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl, + * PDMCritSectRwEnterExclDebug, + * PDMCritSectTryEnterDebug, PDMCritSectTryEnter, + * RTCritSectRwTryEnterExclDebug. + */ +VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL) +{ +#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3) + return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/); +#else + RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API(); + return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/); +#endif +} + + +#ifdef IN_RING3 +/** + * Enters a PDM read/write critical section with exclusive (write) access. + * + * @returns VINF_SUCCESS if entered successfully. + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. + * + * @param pThis Pointer to the read/write critical section. + * @param fCallRing3 Whether this is a VMMRZCallRing3()request. + */ +VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PPDMCRITSECTRW pThis, bool fCallRing3) +{ + return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/); +} +#endif /* IN_RING3 */ + + +/** + * Leave a critical section held exclusively. + * + * @returns VBox status code. + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. + * @param pThis Pointer to the read/write critical section. + * @param fNoVal No validation records (i.e. queued release). + * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl. + */ +static int pdmCritSectRwLeaveExclWorker(PPDMCRITSECTRW pThis, bool fNoVal) +{ + /* + * Validate handle. + */ + AssertPtr(pThis); + AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED); + + RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis); + RTNATIVETHREAD hNativeWriter; + ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter); + AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER); + + /* + * Unwind one recursion. Is it the final one? + */ + if (pThis->s.Core.cWriteRecursions == 1) + { + AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */ +#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) + if (fNoVal) + Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD); + else + { + int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true); + if (RT_FAILURE(rc9)) + return rc9; + } +#endif + /* + * Update the state. + */ +#if defined(IN_RING3) || defined(IN_RING0) +# ifdef IN_RING0 + if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD) + && ASMIntAreEnabled()) +# endif + { + ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0); + STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl); + ASMAtomicWriteHandle(&pThis->s.Core.hNativeWriter, NIL_RTNATIVETHREAD); + + for (;;) + { + uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); + uint64_t u64OldState = u64State; + + uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; + Assert(c > 0); + c--; + + if ( c > 0 + || (u64State & RTCSRW_CNT_RD_MASK) == 0) + { + /* Don't change the direction, wake up the next writer if any. */ + u64State &= ~RTCSRW_CNT_WR_MASK; + u64State |= c << RTCSRW_CNT_WR_SHIFT; + if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) + { + if (c > 0) + { + int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite); + AssertRC(rc); + } + break; + } + } + else + { + /* Reverse the direction and signal the reader threads. */ + u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK); + u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT; + if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState)) + { + Assert(!pThis->s.Core.fNeedReset); + ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true); + int rc = SUPSemEventMultiSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead); + AssertRC(rc); + break; + } + } + + ASMNopPause(); + if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) + return VERR_SEM_DESTROYED; + } + } +#endif /* IN_RING3 || IN_RING0 */ +#ifndef IN_RING3 +# ifdef IN_RING0 + else +# endif + { + /* + * We cannot call neither SUPSemEventSignal nor SUPSemEventMultiSignal, + * so queue the exit request (ring-3). + */ + PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM); + PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); + uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++; + LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis)); + AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves)); + pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = MMHyperCCToR3(pVM, pThis); + VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); + VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); + STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves); + STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl); + } +#endif + } + else + { + /* + * Not the final recursion. + */ + Assert(pThis->s.Core.cWriteRecursions != 0); +#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) + if (fNoVal) + Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD); + else + { + int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite); + if (RT_FAILURE(rc9)) + return rc9; + } +#endif + ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions); + } + + return VINF_SUCCESS; +} + + +/** + * Leave a critical section held exclusively. + * + * @returns VBox status code. + * @retval VERR_SEM_DESTROYED if the critical section is delete before or + * during the operation. + * @param pThis Pointer to the read/write critical section. + * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl. + */ +VMMDECL(int) PDMCritSectRwLeaveExcl(PPDMCRITSECTRW pThis) +{ + return pdmCritSectRwLeaveExclWorker(pThis, false /*fNoVal*/); +} + + +#if defined(IN_RING3) || defined(IN_RING0) +/** + * PDMCritSectBothFF interface. + * + * @param pThis Pointer to the read/write critical section. + */ +void pdmCritSectRwLeaveExclQueued(PPDMCRITSECTRW pThis) +{ + pdmCritSectRwLeaveExclWorker(pThis, true /*fNoVal*/); +} +#endif + + +/** + * Checks the caller is the exclusive (write) owner of the critical section. + * + * @retval @c true if owner. + * @retval @c false if not owner. + * @param pThis Pointer to the read/write critical section. + * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner, + * RTCritSectRwIsWriteOwner. + */ +VMMDECL(bool) PDMCritSectRwIsWriteOwner(PPDMCRITSECTRW pThis) +{ + /* + * Validate handle. + */ + AssertPtr(pThis); + AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false); + + /* + * Check ownership. + */ + RTNATIVETHREAD hNativeWriter; + ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter); + if (hNativeWriter == NIL_RTNATIVETHREAD) + return false; + return hNativeWriter == pdmCritSectRwGetNativeSelf(pThis); +} + + +/** + * Checks if the caller is one of the read owners of the critical section. + * + * @note !CAUTION! This API doesn't work reliably if lock validation isn't + * enabled. Meaning, the answer is not trustworhty unless + * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time. + * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when + * creating the semaphore. And finally, if you used a locking class, + * don't disable deadlock detection by setting cMsMinDeadlock to + * RT_INDEFINITE_WAIT. + * + * In short, only use this for assertions. + * + * @returns @c true if reader, @c false if not. + * @param pThis Pointer to the read/write critical section. + * @param fWannaHear What you'd like to hear when lock validation is not + * available. (For avoiding asserting all over the place.) + * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner. + */ +VMMDECL(bool) PDMCritSectRwIsReadOwner(PPDMCRITSECTRW pThis, bool fWannaHear) +{ + /* + * Validate handle. + */ + AssertPtr(pThis); + AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false); + + /* + * Inspect the state. + */ + uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); + if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)) + { + /* + * It's in write mode, so we can only be a reader if we're also the + * current writer. + */ + RTNATIVETHREAD hWriter; + ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hWriter); + if (hWriter == NIL_RTNATIVETHREAD) + return false; + return hWriter == pdmCritSectRwGetNativeSelf(pThis); + } + + /* + * Read mode. If there are no current readers, then we cannot be a reader. + */ + if (!(u64State & RTCSRW_CNT_RD_MASK)) + return false; + +#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) + /* + * Ask the lock validator. + * Note! It doesn't know everything, let's deal with that if it becomes an issue... + */ + return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD); +#else + /* + * Ok, we don't know, just tell the caller what he want to hear. + */ + return fWannaHear; +#endif +} + + +/** + * Gets the write recursion count. + * + * @returns The write recursion count (0 if bad critsect). + * @param pThis Pointer to the read/write critical section. + * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount, + * RTCritSectRwGetWriteRecursion. + */ +VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis) +{ + /* + * Validate handle. + */ + AssertPtr(pThis); + AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0); + + /* + * Return the requested data. + */ + return pThis->s.Core.cWriteRecursions; +} + + +/** + * Gets the read recursion count of the current writer. + * + * @returns The read recursion count (0 if bad critsect). + * @param pThis Pointer to the read/write critical section. + * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount, + * RTCritSectRwGetWriterReadRecursion. + */ +VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis) +{ + /* + * Validate handle. + */ + AssertPtr(pThis); + AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0); + + /* + * Return the requested data. + */ + return pThis->s.Core.cWriterReads; +} + + +/** + * Gets the current number of reads. + * + * This includes all read recursions, so it might be higher than the number of + * read owners. It does not include reads done by the current writer. + * + * @returns The read count (0 if bad critsect). + * @param pThis Pointer to the read/write critical section. + * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion, + * RTCritSectRwGetReadCount. + */ +VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis) +{ + /* + * Validate input. + */ + AssertPtr(pThis); + AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0); + + /* + * Return the requested data. + */ + uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); + if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT)) + return 0; + return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; +} + + +/** + * Checks if the read/write critical section is initialized or not. + * + * @retval @c true if initialized. + * @retval @c false if not initialized. + * @param pThis Pointer to the read/write critical section. + * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized. + */ +VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis) +{ + AssertPtr(pThis); + return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC; +} + diff --git a/src/VBox/VMM/VMMAll/PDMAllNetShaper.cpp b/src/VBox/VMM/VMMAll/PDMAllNetShaper.cpp new file mode 100644 index 00000000..21e8047d --- /dev/null +++ b/src/VBox/VMM/VMMAll/PDMAllNetShaper.cpp @@ -0,0 +1,77 @@ +/* $Id: PDMAllNetShaper.cpp $ */ +/** @file + * PDM Network Shaper - Limit network traffic according to bandwidth group settings. + */ + +/* + * Copyright (C) 2011-2013 Oracle Corporation + * + * This file is part of VirtualBox Open Source Edition (OSE), as + * available from http://www.virtualbox.org. This file is free software; + * you can redistribute it and/or modify it under the terms of the GNU + * General Public License (GPL) as published by the Free Software + * Foundation, in version 2 as it comes in the "COPYING" file of the + * VirtualBox OSE distribution. VirtualBox OSE is distributed in the + * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. + */ + + +/******************************************************************************* +* Header Files * +*******************************************************************************/ +#define LOG_GROUP LOG_GROUP_NET_SHAPER +#include <VBox/vmm/pdm.h> +#include <VBox/log.h> +#include <iprt/time.h> + +#include <VBox/vmm/pdmnetshaper.h> +#include "PDMNetShaperInternal.h" + + +/** + * Obtain bandwidth in a bandwidth group. + * + * @returns True if bandwidth was allocated, false if not. + * @param pFilter Pointer to the filter that allocates bandwidth. + * @param cbTransfer Number of bytes to allocate. + */ +VMMDECL(bool) PDMNsAllocateBandwidth(PPDMNSFILTER pFilter, size_t cbTransfer) +{ + AssertPtrReturn(pFilter, true); + if (!VALID_PTR(pFilter->CTX_SUFF(pBwGroup))) + return true; + + PPDMNSBWGROUP pBwGroup = ASMAtomicReadPtrT(&pFilter->CTX_SUFF(pBwGroup), PPDMNSBWGROUP); + int rc = PDMCritSectEnter(&pBwGroup->Lock, VERR_SEM_BUSY); AssertRC(rc); + if (RT_UNLIKELY(rc == VERR_SEM_BUSY)) + return true; + + bool fAllowed = true; + if (pBwGroup->cbPerSecMax) + { + /* Re-fill the bucket first */ + uint64_t tsNow = RTTimeSystemNanoTS(); + uint32_t uTokensAdded = (tsNow - pBwGroup->tsUpdatedLast) * pBwGroup->cbPerSecMax / (1000 * 1000 * 1000); + uint32_t uTokens = RT_MIN(pBwGroup->cbBucket, uTokensAdded + pBwGroup->cbTokensLast); + + if (cbTransfer > uTokens) + { + fAllowed = false; + ASMAtomicWriteBool(&pFilter->fChoked, true); + } + else + { + pBwGroup->tsUpdatedLast = tsNow; + pBwGroup->cbTokensLast = uTokens - (uint32_t)cbTransfer; + } + Log2(("pdmNsAllocateBandwidth: BwGroup=%#p{%s} cbTransfer=%u uTokens=%u uTokensAdded=%u fAllowed=%RTbool\n", + pBwGroup, R3STRING(pBwGroup->pszNameR3), cbTransfer, uTokens, uTokensAdded, fAllowed)); + } + else + Log2(("pdmNsAllocateBandwidth: BwGroup=%#p{%s} disabled fAllowed=%RTbool\n", + pBwGroup, R3STRING(pBwGroup->pszNameR3), fAllowed)); + + rc = PDMCritSectLeave(&pBwGroup->Lock); AssertRC(rc); + return fAllowed; +} + diff --git a/src/VBox/VMM/VMMAll/PDMAllQueue.cpp b/src/VBox/VMM/VMMAll/PDMAllQueue.cpp index ddc5ff42..6906d840 100644 --- a/src/VBox/VMM/VMMAll/PDMAllQueue.cpp +++ b/src/VBox/VMM/VMMAll/PDMAllQueue.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -99,7 +99,7 @@ VMMDECL(void) PDMQueueInsert(PPDMQUEUE pQueue, PPDMQUEUEITEMCORE pItem) if (!pQueue->pTimer) { PVM pVM = pQueue->CTX_SUFF(pVM); - Log2(("PDMQueueInsert: VM_FF_PDM_QUEUES %d -> 1\n", VM_FF_ISSET(pVM, VM_FF_PDM_QUEUES))); + Log2(("PDMQueueInsert: VM_FF_PDM_QUEUES %d -> 1\n", VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))); VM_FF_SET(pVM, VM_FF_PDM_QUEUES); ASMAtomicBitSet(&pVM->pdm.s.fQueueFlushing, PDM_QUEUE_FLUSH_FLAG_PENDING_BIT); #ifdef IN_RING3 diff --git a/src/VBox/VMM/VMMAll/PGMAll.cpp b/src/VBox/VMM/VMMAll/PGMAll.cpp index 84f80021..9a046d1d 100644 --- a/src/VBox/VMM/VMMAll/PGMAll.cpp +++ b/src/VBox/VMM/VMMAll/PGMAll.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -33,8 +33,8 @@ # include <VBox/vmm/rem.h> #endif #include <VBox/vmm/em.h> -#include <VBox/vmm/hwaccm.h> -#include <VBox/vmm/hwacc_vmx.h> +#include <VBox/vmm/hm.h> +#include <VBox/vmm/hm_vmx.h> #include "PGMInternal.h" #include <VBox/vmm/vm.h> #include "PGMInline.h" @@ -772,9 +772,10 @@ VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage) && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)) { pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3; - Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled); + Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM)); } +# ifdef VBOX_WITH_RAW_MODE /* * Inform CSAM about the flush * @@ -782,6 +783,7 @@ VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage) * callbacks for virtual handlers, this is no longer required. */ CSAMR3FlushPage(pVM, GCPtrPage); +# endif #endif /* IN_RING3 */ /* Ignore all irrelevant error codes. */ @@ -915,6 +917,56 @@ VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpF /** + * Changing the page flags for a single page in the shadow page tables so as to + * make it supervisor and writable. + * + * This if for dealing with CR0.WP=0 and readonly user pages. + * + * @returns VBox status code. + * @param pVCpu Pointer to the VMCPU. + * @param GCPtr Virtual address of the first page in the range. + * @param fBigPage Whether or not this is a big page. If it is, we have to + * change the shadow PDE as well. If it isn't, the caller + * has checked that the shadow PDE doesn't need changing. + * We ASSUME 4KB pages backing the big page here! + * @param fOpFlags A combination of the PGM_MK_PG_XXX flags. + */ +int pgmShwMakePageSupervisorAndWritable(PVMCPU pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags) +{ + int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags); + if (rc == VINF_SUCCESS && fBigPage) + { + /* this is a bit ugly... */ + switch (pVCpu->pgm.s.enmShadowMode) + { + case PGMMODE_32_BIT: + { + PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr); + AssertReturn(pPde, VERR_INTERNAL_ERROR_3); + Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u)); + pPde->n.u1Write = 1; + Log(("-> PDE=%#llx (32)\n", pPde->u)); + break; + } + case PGMMODE_PAE: + case PGMMODE_PAE_NX: + { + PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr); + AssertReturn(pPde, VERR_INTERNAL_ERROR_3); + Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u)); + pPde->n.u1Write = 1; + Log(("-> PDE=%#llx (PAE)\n", pPde->u)); + break; + } + default: + AssertFailedReturn(VERR_INTERNAL_ERROR_4); + } + } + return rc; +} + + +/** * Gets the shadow page directory for the specified address, PAE. * * @returns Pointer to the shadow PD. @@ -957,7 +1009,7 @@ int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86P /* PD not present; guest must reload CR3 to change it. * No need to monitor anything in this case. */ - Assert(!HWACCMIsEnabled(pVM)); + Assert(!HMIsEnabled(pVM)); GCPdPt = uGstPdpe & X86_PDPE_PG_MASK; enmKind = PGMPOOLKIND_PAE_PD_PHYS; @@ -1229,7 +1281,7 @@ static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PE RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT; rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu), - PGMPOOL_IDX_NESTED_ROOT, iPml4, false /*fLockPage*/, + pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/, &pShwPage); AssertRCReturn(rc, rc); } @@ -1362,6 +1414,63 @@ VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGC /** + * Performs a guest page table walk. + * + * The guest should be in paged protect mode or long mode when making a call to + * this function. + * + * @returns VBox status code. + * @retval VINF_SUCCESS on success. + * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details. + * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is + * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID. + * + * @param pVCpu The current CPU. + * @param GCPtr The guest virtual address to walk by. + * @param pWalk Where to return the walk result. This is valid on some + * error codes as well. + */ +int pgmGstPtWalk(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk) +{ + VMCPU_ASSERT_EMT(pVCpu); + switch (pVCpu->pgm.s.enmGuestMode) + { + case PGMMODE_32_BIT: + pWalk->enmType = PGMPTWALKGSTTYPE_32BIT; + return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, &pWalk->u.Legacy); + + case PGMMODE_PAE: + case PGMMODE_PAE_NX: + pWalk->enmType = PGMPTWALKGSTTYPE_PAE; + return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, &pWalk->u.Pae); + +#if !defined(IN_RC) + case PGMMODE_AMD64: + case PGMMODE_AMD64_NX: + pWalk->enmType = PGMPTWALKGSTTYPE_AMD64; + return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, &pWalk->u.Amd64); +#endif + + case PGMMODE_REAL: + case PGMMODE_PROTECTED: + pWalk->enmType = PGMPTWALKGSTTYPE_INVALID; + return VERR_PGM_NOT_USED_IN_MODE; + +#if defined(IN_RC) + case PGMMODE_AMD64: + case PGMMODE_AMD64_NX: +#endif + case PGMMODE_NESTED: + case PGMMODE_EPT: + default: + AssertFailed(); + pWalk->enmType = PGMPTWALKGSTTYPE_INVALID; + return VERR_PGM_NOT_USED_IN_MODE; + } +} + + +/** * Checks if the page is present. * * @returns true if the page is present. @@ -1636,8 +1745,8 @@ int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4) * Gets the PAE PDPEs values cached by the CPU. * * @returns VBox status code. - * @param pVCpu The virtual CPU. - * @param paPdpes Where to return the four PDPEs. The array + * @param pVCpu Pointer to the VMCPU. + * @param paPdpes Where to return the four PDPEs. The array * pointed to must have 4 entries. */ VMM_INT_DECL(int) PGMGstGetPaePdpes(PVMCPU pVCpu, PX86PDPE paPdpes) @@ -1657,12 +1766,13 @@ VMM_INT_DECL(int) PGMGstGetPaePdpes(PVMCPU pVCpu, PX86PDPE paPdpes) * * @remarks This must be called *AFTER* PGMUpdateCR3. * - * @returns VBox status code. - * @param pVCpu The virtual CPU. - * @param paPdpes The four PDPE values. The array pointed to - * must have exactly 4 entries. + * @param pVCpu Pointer to the VMCPU. + * @param paPdpes The four PDPE values. The array pointed to must + * have exactly 4 entries. + * + * @remarks No-long-jump zone!!! */ -VMM_INT_DECL(int) PGMGstUpdatePaePdpes(PVMCPU pVCpu, PCX86PDPE paPdpes) +VMM_INT_DECL(void) PGMGstUpdatePaePdpes(PVMCPU pVCpu, PCX86PDPE paPdpes) { Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT); @@ -1681,7 +1791,8 @@ VMM_INT_DECL(int) PGMGstUpdatePaePdpes(PVMCPU pVCpu, PCX86PDPE paPdpes) pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS; } } - return VINF_SUCCESS; + + VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES); } @@ -1870,7 +1981,7 @@ VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal) else { AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc)); - Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3)); + Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3)); pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3; pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3; if (pgmMapAreMappingsFloating(pVM)) @@ -1899,7 +2010,7 @@ VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal) if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3) { pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3; - Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled); + Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM)); } if (fGlobal) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3Global)); @@ -1923,9 +2034,9 @@ VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal) * * @returns VBox status code. * @retval VINF_SUCCESS. - * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring - * requires a CR3 sync. This can safely be ignored and overridden since - * the FF will be set too then.) + * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested + * paging modes). This can safely be ignored and overridden since the + * FF will be set too then. * @param pVCpu Pointer to the VMCPU. * @param cr3 The new cr3. */ @@ -1936,7 +2047,7 @@ VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3) /* We assume we're only called in nested paging mode. */ Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT); - Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsDisabled); + Assert(!pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM))); Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)); /* @@ -1966,6 +2077,8 @@ VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3) rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3); AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */ } + + VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3); return rc; } @@ -2018,7 +2131,7 @@ VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, if (!(cr4 & X86_CR4_PGE)) fGlobal = true; LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal, - VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))); + VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))); /* * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB). @@ -2098,7 +2211,7 @@ VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, { pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3; Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsFixed); - Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsDisabled); + Assert(pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM))); } } @@ -2181,6 +2294,34 @@ VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t ef /** + * Called by CPUM or REM when CR0.WP changes to 1. + * + * @param pVCpu The cross context virtual CPU structure of the caller. + * @thread EMT + */ +VMMDECL(void) PGMCr0WpEnabled(PVMCPU pVCpu) +{ + /* + * Netware WP0+RO+US hack cleanup when WP0 -> WP1. + * + * Use the counter to judge whether there might be pool pages with active + * hacks in them. If there are, we will be running the risk of messing up + * the guest by allowing it to write to read-only pages. Thus, we have to + * clear the page pool ASAP if there is the slightest chance. + */ + if (pVCpu->pgm.s.cNetwareWp0Hacks > 0) + { + Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1); + + Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks)); + pVCpu->pgm.s.cNetwareWp0Hacks = 0; + pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL; + VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); + } +} + + +/** * Gets the current guest paging mode. * * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode(). @@ -2371,9 +2512,17 @@ VMMDECL(int) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages) * @returns VBox status code * @param pVM Pointer to the VM. */ +#if defined(VBOX_STRICT) && defined(IN_RING3) +int pgmLockDebug(PVM pVM, RT_SRC_POS_DECL) +#else int pgmLock(PVM pVM) +#endif { +#if defined(VBOX_STRICT) && defined(IN_RING3) + int rc = PDMCritSectEnterDebug(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS); +#else int rc = PDMCritSectEnter(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY); +#endif #if defined(IN_RC) || defined(IN_RING0) if (rc == VERR_SEM_BUSY) rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0); @@ -2507,7 +2656,7 @@ static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, v cch = pfnOutput(pvArgOutput, szTmp, cch); } else - cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1); + cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>")); NOREF(pszType); NOREF(cchWidth); NOREF(pvUser); return cch; } @@ -2529,7 +2678,7 @@ static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutpu cch = pfnOutput(pvArgOutput, szTmp, cch); } else - cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1); + cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>")); NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags); return cch; } diff --git a/src/VBox/VMM/VMMAll/PGMAllBth.h b/src/VBox/VMM/VMMAll/PGMAllBth.h index 62da7508..dfe3e244 100644 --- a/src/VBox/VMM/VMMAll/PGMAllBth.h +++ b/src/VBox/VMM/VMMAll/PGMAllBth.h @@ -15,7 +15,7 @@ */ /* - * Copyright (C) 2006-2012 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -103,8 +103,9 @@ PGM_BTH_DECL(VBOXSTRICTRC, Trap0eHandlerGuestFault)(PVMCPU pVCpu, PGSTPTWALK pGs * If the guest happens to access a non-present page, where our hypervisor * is currently mapped, then we'll create a #PF storm in the guest. */ - if ( (uErr & (X86_TRAP_PF_P | X86_TRAP_PF_RW)) == (X86_TRAP_PF_P | X86_TRAP_PF_RW) - && MMHyperIsInsideArea(pVCpu->CTX_SUFF(pVM), pGstWalk->Core.GCPtr)) + if ( (uErr & (X86_TRAP_PF_P | X86_TRAP_PF_RW)) == (X86_TRAP_PF_P | X86_TRAP_PF_RW) + && pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)) + && MMHyperIsInsideArea(pVCpu->CTX_SUFF(pVM), pGstWalk->Core.GCPtr)) { /* Force a CR3 sync to check for conflicts and emulate the instruction. */ VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); @@ -351,6 +352,7 @@ static VBOXSTRICTRC PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(PVMCPU pVCpu, RT return rc; } /* Unhandled part of a monitored page */ + Log(("Unhandled part of monitored page %RGv\n", pvFault)); } else { @@ -864,6 +866,7 @@ PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegF # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0) if ( !GstWalk.Core.fEffectiveUS + && CSAMIsEnabled(pVM) && CPUMGetGuestCPL(pVCpu) == 0) { /* Note: Can't check for X86_TRAP_ID bit, because that requires execute disable support on the CPU. */ @@ -970,7 +973,7 @@ PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegF AssertMsg(rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("%Rrc\n", rc)); return rc; } - if (RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))) + if (RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))) return VINF_EM_NO_MEMORY; } @@ -980,10 +983,45 @@ PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegF */ if ( !GstWalk.Core.fEffectiveRW && (CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG - && CPUMGetGuestCPL(pVCpu) == 0) + && CPUMGetGuestCPL(pVCpu) < 3) { Assert((uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_P)) == (X86_TRAP_PF_RW | X86_TRAP_PF_P)); + + /* + * The Netware WP0+RO+US hack. + * + * Netware sometimes(/always?) runs with WP0. It has been observed doing + * excessive write accesses to pages which are mapped with US=1 and RW=0 + * while WP=0. This causes a lot of exits and extremely slow execution. + * To avoid trapping and emulating every write here, we change the shadow + * page table entry to map it as US=0 and RW=1 until user mode tries to + * access it again (see further below). We count these shadow page table + * changes so we can avoid having to clear the page pool every time the WP + * bit changes to 1 (see PGMCr0WpEnabled()). + */ +# if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE) && 1 + if ( GstWalk.Core.fEffectiveUS + && !GstWalk.Core.fEffectiveRW + && (GstWalk.Core.fBigPage || GstWalk.Pde.n.u1Write) + && pVM->cCpus == 1 /* Sorry, no go on SMP. Add CFGM option? */) + { + Log(("PGM #PF: Netware WP0+RO+US hack: pvFault=%RGp uErr=%#x (big=%d)\n", pvFault, uErr, GstWalk.Core.fBigPage)); + rc = pgmShwMakePageSupervisorAndWritable(pVCpu, pvFault, GstWalk.Core.fBigPage, PGM_MK_PG_IS_WRITE_FAULT); + if (rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3) + { + PGM_INVL_PG(pVCpu, pvFault); + pVCpu->pgm.s.cNetwareWp0Hacks++; + STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Wp0RoUsHack; }); + return rc; + } + AssertMsg(RT_FAILURE_NP(rc), ("%Rrc\n", rc)); + Log(("pgmShwMakePageSupervisorAndWritable(%RGv) failed with rc=%Rrc - ignored\n", pvFault, rc)); + } +# endif + + /* Interpret the access. */ rc = VBOXSTRICTRC_TODO(PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault)); + Log(("PGM #PF: WP0 emulation (pvFault=%RGp uErr=%#x cpl=%d fBig=%d fEffUs=%d)\n", pvFault, uErr, CPUMGetGuestCPL(pVCpu), GstWalk.Core.fBigPage, GstWalk.Core.fEffectiveUS)); if (RT_SUCCESS(rc)) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eWPEmulInRZ); else @@ -1016,7 +1054,7 @@ PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegF * the TLB first as the page is very likely to be in it. */ # if PGM_SHW_TYPE == PGM_TYPE_EPT - HWACCMInvalidatePhysPage(pVM, (RTGCPHYS)pvFault); + HMInvalidatePhysPage(pVM, (RTGCPHYS)pvFault); # else PGM_INVL_PG(pVCpu, pvFault); # endif @@ -1026,7 +1064,7 @@ PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegF if (!pVM->pgm.s.fNestedPaging) { rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, &GCPhys2); - AssertMsg(RT_SUCCESS(rc) && (fPageGst & X86_PTE_RW), ("rc=%Rrc fPageGst=%RX64\n", rc, fPageGst)); + AssertMsg(RT_SUCCESS(rc) && ((fPageGst & X86_PTE_RW) || ((CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG && CPUMGetGuestCPL(pVCpu) < 3)), ("rc=%Rrc fPageGst=%RX64\n", rc, fPageGst)); LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, GCPhys2, (uint64_t)fPageGst)); } uint64_t fPageShw; @@ -1038,6 +1076,32 @@ PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegF return VINF_SUCCESS; } } +# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) + /* + * Check for Netware WP0+RO+US hack from above and undo it when user + * mode accesses the page again. + */ + else if ( GstWalk.Core.fEffectiveUS + && !GstWalk.Core.fEffectiveRW + && (GstWalk.Core.fBigPage || GstWalk.Pde.n.u1Write) + && pVCpu->pgm.s.cNetwareWp0Hacks > 0 + && (CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG + && CPUMGetGuestCPL(pVCpu) == 3 + && pVM->cCpus == 1 + ) + { + Log(("PGM #PF: Undo netware WP0+RO+US hack: pvFault=%RGp uErr=%#x\n", pvFault, uErr)); + rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr); + if (RT_SUCCESS(rc)) + { + PGM_INVL_PG(pVCpu, pvFault); + pVCpu->pgm.s.cNetwareWp0Hacks--; + STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Wp0RoUsUnhack; }); + return VINF_SUCCESS; + } + } +# endif /* PGM_WITH_PAGING */ + /** @todo else: why are we here? */ # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && defined(VBOX_STRICT) @@ -1059,8 +1123,13 @@ PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegF * Compare page flags. * Note: we have AVL, A, D bits desynced. */ - AssertMsg( (fPageShw & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)) - == (fPageGst & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)), + AssertMsg( (fPageShw & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)) + == (fPageGst & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)) + || ( pVCpu->pgm.s.cNetwareWp0Hacks > 0 + && (fPageShw & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK | X86_PTE_RW | X86_PTE_US)) + == (fPageGst & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK | X86_PTE_RW | X86_PTE_US)) + && (fPageShw & (X86_PTE_RW | X86_PTE_US)) == X86_PTE_RW + && (fPageGst & (X86_PTE_RW | X86_PTE_US)) == X86_PTE_US), ("Page flags mismatch! pvFault=%RGv uErr=%x GCPhys=%RGp fPageShw=%RX64 fPageGst=%RX64\n", pvFault, (uint32_t)uErr, GCPhys, fPageShw, fPageGst)); } @@ -1144,6 +1213,7 @@ PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage) { Assert(!(pPdptDst->a[iPdpt].u & PGM_PLXFLAGS_MAPPING)); STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePageSkipped)); + PGM_INVL_PG(pVCpu, GCPtrPage); return VINF_SUCCESS; } @@ -1172,6 +1242,7 @@ PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage) { AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT, ("Unexpected rc=%Rrc\n", rc)); STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePageSkipped)); + PGM_INVL_PG(pVCpu, GCPtrPage); return VINF_SUCCESS; } Assert(pPDDst); @@ -1182,6 +1253,7 @@ PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage) if (!pPdpeDst->n.u1Present) { STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePageSkipped)); + PGM_INVL_PG(pVCpu, GCPtrPage); return VINF_SUCCESS; } @@ -1195,6 +1267,7 @@ PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage) if (!PdeDst.n.u1Present) { STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePageSkipped)); + PGM_INVL_PG(pVCpu, GCPtrPage); return VINF_SUCCESS; } @@ -1231,14 +1304,14 @@ PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage) * This doesn't make sense in GC/R0 so we'll skip it entirely there. */ # ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH - if ( VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3) - || ( VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) + if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3) + || ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) && fIsBigPage && PdeSrc.b.u1Global ) ) # else - if (VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL) ) + if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL) ) # endif { STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePageSkipped)); @@ -1253,7 +1326,7 @@ PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage) if (PdeSrc.n.u1Present) { Assert( PdeSrc.n.u1User == PdeDst.n.u1User - && (PdeSrc.n.u1Write || !PdeDst.n.u1Write)); + && (PdeSrc.n.u1Write || !PdeDst.n.u1Write || pVCpu->pgm.s.cNetwareWp0Hacks > 0)); # ifndef PGM_WITHOUT_MAPPING if (PdeDst.u & PGM_PDFLAGS_MAPPING) { @@ -1947,7 +2020,7 @@ static int PGM_BTH_NAME(SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage Assert(cPages == 1 || !(uErr & X86_TRAP_PF_P)); if ( cPages > 1 && !(uErr & X86_TRAP_PF_P) - && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) + && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) { /* * This code path is currently only taken when the caller is PGMTrap0eHandler @@ -2219,7 +2292,7 @@ static int PGM_BTH_NAME(SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage Assert(cPages == 1 || !(uErr & X86_TRAP_PF_P)); if ( cPages > 1 && !(uErr & X86_TRAP_PF_P) - && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) + && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) { /* * This code path is currently only taken when the caller is PGMTrap0eHandler @@ -2247,7 +2320,7 @@ static int PGM_BTH_NAME(SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage SHW_PTE_LOG64(pPTDst->a[iPTDst]), SHW_PTE_IS_TRACK_DIRTY(pPTDst->a[iPTDst]) ? " Track-Dirty" : "")); - if (RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))) + if (RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))) break; } else @@ -2435,7 +2508,8 @@ static int PGM_BTH_NAME(CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPD /* Bail out here as pgmPoolGetPage will return NULL and we'll crash below. * Our individual shadow handlers will provide more information and force a fatal exit. */ - if (MMHyperIsInsideArea(pVM, (RTGCPTR)GCPtrPage)) + if ( !HMIsEnabled(pVM) + && MMHyperIsInsideArea(pVM, (RTGCPTR)GCPtrPage)) { LogRel(("CheckPageFault: write to hypervisor region %RGv\n", GCPtrPage)); return VINF_PGM_NO_DIRTY_BIT_TRACKING; @@ -2905,7 +2979,7 @@ static int PGM_BTH_NAME(SyncPT)(PVMCPU pVCpu, unsigned iPDSrc, PGSTPD pPDSrc, RT PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys); unsigned iPTDst = 0; while ( iPTDst < RT_ELEMENTS(pPTDst->a) - && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) + && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) { if (pRam && GCPhys >= pRam->GCPhys) { @@ -2940,7 +3014,7 @@ static int PGM_BTH_NAME(SyncPT)(PVMCPU pVCpu, unsigned iPDSrc, PGSTPD pPDSrc, RT { rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys); AssertRCReturn(rc, rc); - if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) + if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) break; } # endif @@ -3213,7 +3287,7 @@ static int PGM_BTH_NAME(SyncPT)(PVMCPU pVCpu, unsigned iPDSrc, PGSTPD pPDSrc, RT SHW_PTE_LOG64(pPTDst->a[iPTDst]), SHW_PTE_IS_TRACK_DIRTY(pPTDst->a[iPTDst]) ? " Track-Dirty" : "")); - if (RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))) + if (RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))) break; } } @@ -3596,7 +3670,7 @@ PGM_BTH_DECL(int, SyncCR3)(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr PVM pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM); NOREF(cr0); NOREF(cr3); NOREF(cr4); NOREF(fGlobal); - LogFlow(("SyncCR3 FF=%d fGlobal=%d\n", !!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), fGlobal)); + LogFlow(("SyncCR3 FF=%d fGlobal=%d\n", !!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), fGlobal)); #if PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT @@ -3729,7 +3803,7 @@ PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGC # if PGM_GST_TYPE == PGM_TYPE_32BIT && defined(IN_RING3) pgmGstGet32bitPDPtr(pVCpu); RTGCPHYS GCPhys; - rc = PGMR3DbgR3Ptr2GCPhys(pVM, pPGM->pGst32BitPdR3, &GCPhys); + rc = PGMR3DbgR3Ptr2GCPhys(pVM->pUVM, pPGM->pGst32BitPdR3, &GCPhys); AssertRCReturn(rc, 1); AssertMsgReturn(PGM_A20_APPLY(pVCpu, cr3 & GST_CR3_PAGE_MASK) == GCPhys, ("GCPhys=%RGp cr3=%RGp\n", GCPhys, (RTGCPHYS)cr3), false); # endif @@ -4065,7 +4139,7 @@ PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGC { # ifdef IN_RING3 PGMAssertHandlerAndFlagsInSync(pVM); - DBGFR3PagingDumpEx(pVM, pVCpu->idCpu, DBGFPGDMP_FLAGS_CURRENT_CR3 | DBGFPGDMP_FLAGS_CURRENT_MODE + DBGFR3PagingDumpEx(pVM->pUVM, pVCpu->idCpu, DBGFPGDMP_FLAGS_CURRENT_CR3 | DBGFPGDMP_FLAGS_CURRENT_MODE | DBGFPGDMP_FLAGS_GUEST | DBGFPGDMP_FLAGS_HEADER | DBGFPGDMP_FLAGS_PRINT_CR3, 0, 0, UINT64_MAX, 99, NULL); # endif @@ -4581,8 +4655,6 @@ PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3) */ PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); PPGMPOOLPAGE pOldShwPageCR3 = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3); - uint32_t iOldShwUserTable = pVCpu->pgm.s.iShwUserTable; - uint32_t iOldShwUser = pVCpu->pgm.s.iShwUser; PPGMPOOLPAGE pNewShwPageCR3; pgmLock(pVM); @@ -4594,7 +4666,7 @@ PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3) Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32))); rc = pgmPoolAlloc(pVM, GCPhysCR3 & GST_CR3_PAGE_MASK, BTH_PGMPOOLKIND_ROOT, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu), - SHW_POOL_ROOT_IDX, GCPhysCR3 >> PAGE_SHIFT, true /*fLockPage*/, + NIL_PGMPOOL_IDX, UINT32_MAX, true /*fLockPage*/, &pNewShwPageCR3); AssertFatalRC(rc); rc = VINF_SUCCESS; @@ -4611,8 +4683,6 @@ PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3) VMMRZCallRing3Disable(pVCpu); # endif - pVCpu->pgm.s.iShwUser = SHW_POOL_ROOT_IDX; - pVCpu->pgm.s.iShwUserTable = GCPhysCR3 >> PAGE_SHIFT; pVCpu->pgm.s.CTX_SUFF(pShwPageCR3) = pNewShwPageCR3; # ifdef IN_RING0 pVCpu->pgm.s.pShwPageCR3R3 = MMHyperCCToR3(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); @@ -4632,7 +4702,7 @@ PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3) * make sure we check for conflicts in the new CR3 root. */ # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) - Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); + Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); # endif rc = pgmMapActivateCR3(pVM, pNewShwPageCR3); AssertRCReturn(rc, rc); @@ -4659,7 +4729,7 @@ PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3) /* Mark the page as unlocked; allow flushing again. */ pgmPoolUnlockPage(pPool, pOldShwPageCR3); - pgmPoolFreeByPage(pPool, pOldShwPageCR3, iOldShwUser, iOldShwUserTable); + pgmPoolFreeByPage(pPool, pOldShwPageCR3, NIL_PGMPOOL_IDX, UINT32_MAX); } pgmUnlock(pVM); # else @@ -4742,8 +4812,6 @@ PGM_BTH_DECL(int, UnmapCR3)(PVMCPU pVCpu) { PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); - Assert(pVCpu->pgm.s.iShwUser != PGMPOOL_IDX_NESTED_ROOT); - # ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT if (pPool->cDirtyPages) pgmPoolResetDirtyPages(pVM); @@ -4752,12 +4820,10 @@ PGM_BTH_DECL(int, UnmapCR3)(PVMCPU pVCpu) /* Mark the page as unlocked; allow flushing again. */ pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); - pgmPoolFreeByPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), pVCpu->pgm.s.iShwUser, pVCpu->pgm.s.iShwUserTable); + pgmPoolFreeByPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), NIL_PGMPOOL_IDX, UINT32_MAX); pVCpu->pgm.s.pShwPageCR3R3 = 0; pVCpu->pgm.s.pShwPageCR3R0 = 0; pVCpu->pgm.s.pShwPageCR3RC = 0; - pVCpu->pgm.s.iShwUser = 0; - pVCpu->pgm.s.iShwUserTable = 0; } pgmUnlock(pVM); # endif diff --git a/src/VBox/VMM/VMMAll/PGMAllGst.h b/src/VBox/VMM/VMMAll/PGMAllGst.h index 9d8d6317..0b94e510 100644 --- a/src/VBox/VMM/VMMAll/PGMAllGst.h +++ b/src/VBox/VMM/VMMAll/PGMAllGst.h @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2010 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMAll/PGMAllHandler.cpp b/src/VBox/VMM/VMMAll/PGMAllHandler.cpp index faa827f8..688a594d 100644 --- a/src/VBox/VMM/VMMAll/PGMAllHandler.cpp +++ b/src/VBox/VMM/VMMAll/PGMAllHandler.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2010 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -117,7 +117,7 @@ VMMDECL(int) PGMHandlerPhysicalRegisterEx(PVM pVM, PGMPHYSHANDLERTYPE enmType, R VERR_INVALID_PARAMETER); AssertPtrReturn(pfnHandlerR3, VERR_INVALID_POINTER); AssertReturn(pfnHandlerR0, VERR_INVALID_PARAMETER); - AssertReturn(pfnHandlerRC, VERR_INVALID_PARAMETER); + AssertReturn(pfnHandlerRC || HMIsEnabled(pVM), VERR_INVALID_PARAMETER); /* * We require the range to be within registered ram. @@ -129,7 +129,7 @@ VMMDECL(int) PGMHandlerPhysicalRegisterEx(PVM pVM, PGMPHYSHANDLERTYPE enmType, R || GCPhys > pRam->GCPhysLast) { #ifdef IN_RING3 - DBGFR3Info(pVM, "phys", NULL, NULL); + DBGFR3Info(pVM->pUVM, "phys", NULL, NULL); #endif AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast)); return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE; @@ -183,7 +183,7 @@ VMMDECL(int) PGMHandlerPhysicalRegisterEx(PVM pVM, PGMPHYSHANDLERTYPE enmType, R pgmUnlock(pVM); #if defined(IN_RING3) && defined(VBOX_STRICT) - DBGFR3Info(pVM, "handlers", "phys nostats", NULL); + DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL); #endif AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s\n", GCPhys, GCPhysLast, pszDesc)); MMHyperFree(pVM, pNew); @@ -242,7 +242,7 @@ static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDL Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs; rc=%d\n", rc)); } else - Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc; sync flags=%x VMCPU_FF_PGM_SYNC_CR3=%d\n", rc, VMMGetCpu(pVM)->pgm.s.fSyncFlags, VMCPU_FF_ISSET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3))); + Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc; sync flags=%x VMCPU_FF_PGM_SYNC_CR3=%d\n", rc, VMMGetCpu(pVM)->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3))); return rc; } @@ -423,7 +423,8 @@ DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PVM pVM, RTGCPHYS GCPhys, boo */ void pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, bool fDoAccounting) { - Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO); + Assert( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO + || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO); Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED); /* @@ -436,7 +437,7 @@ void pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys if (fFlushTLBs && rc != VINF_PGM_SYNC_CR3) PGM_INVL_VCPU_TLBS(VMMGetCpu0(pVM)); # else - HWACCMFlushTLBOnAllVCpus(pVM); + HMFlushTLBOnAllVCpus(pVM); # endif /* @@ -494,9 +495,10 @@ static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur) int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint); if (RT_SUCCESS(rc)) { - /* Reset MMIO2 for MMIO pages to MMIO, since this aliasing is our business. + /* Reset aliased MMIO pages to MMIO, since this aliasing is our business. (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */ - if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO) + if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO + || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO) { Assert(pCur->cAliasedPages > 0); pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys, false /*fDoAccounting*/); @@ -887,7 +889,8 @@ VMMDECL(int) PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys) uint32_t cLeft = pCur->cPages; while (cLeft-- > 0) { - if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO) + if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO + || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO) { Assert(pCur->cAliasedPages > 0); pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)cLeft << PAGE_SHIFT), @@ -1003,6 +1006,7 @@ VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS G return VERR_PGM_HANDLER_NOT_FOUND; } +#ifndef IEM_VERIFICATION_MODE_FULL /** * Replaces an MMIO page with an MMIO2 page. @@ -1130,13 +1134,16 @@ VMMDECL(int) PGMHandlerPhysicalPageAlias(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCP return VERR_PGM_HANDLER_NOT_FOUND; } + /** - * Replaces an MMIO page with an arbitrary HC page. + * Replaces an MMIO page with an arbitrary HC page in the shadow page tables. * - * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to - * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no - * backing, the caller must provide a replacement page. For various reasons the - * replacement page must be an MMIO2 page. + * This differs from PGMHandlerPhysicalPageAlias in that the page doesn't need + * to be a known MMIO2 page and that only shadow paging may access the page. + * The latter distinction is important because the only use for this feature is + * for mapping the special APIC access page that VT-x uses to detect APIC MMIO + * operations, the page is shared between all guest CPUs and actually not + * written to. At least at the moment. * * The caller must do required page table modifications. You can get away * without making any modifications since it's an MMIO page, the cost is an extra @@ -1144,11 +1151,6 @@ VMMDECL(int) PGMHandlerPhysicalPageAlias(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCP * * Call PGMHandlerPhysicalReset() to restore the MMIO page. * - * The caller may still get handler callback even after this call and must be - * able to deal correctly with such calls. The reason for these callbacks are - * either that we're executing in the recompiler (which doesn't know about this - * arrangement) or that we've been restored from saved state (where we won't - * save the change). * * @returns VBox status code. * @param pVM Pointer to the VM. @@ -1191,7 +1193,7 @@ VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS G if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO) { pgmUnlock(pVM); - AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO, + AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO, ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage), VERR_PGM_PHYS_NOT_MMIO2); return VINF_PGM_HANDLER_ALREADY_ALIASED; @@ -1200,17 +1202,14 @@ VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS G /* * Do the actual remapping here. - * This page now serves as an alias for the backing memory specified. + * This page now serves as an alias for the backing memory + * specified as far as shadow paging is concerned. */ LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %RHp\n", GCPhysPage, pPage, HCPhysPageRemap)); PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhysPageRemap); - PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO); + PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO); PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED); - /** @todo hack alert - * This needs to be done properly. Currently we get away with it as the recompiler directly calls - * IOM read and write functions. Access through PGMPhysRead/Write will crash the process. - */ PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID); PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED); pCur->cAliasedPages++; @@ -1234,6 +1233,7 @@ VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS G return VERR_PGM_HANDLER_NOT_FOUND; } +#endif /* !IEM_VERIFICATION_MODE_FULL */ /** * Checks if a physical range is handled diff --git a/src/VBox/VMM/VMMAll/PGMAllMap.cpp b/src/VBox/VMM/VMMAll/PGMAllMap.cpp index 188e7ac3..8edb607c 100644 --- a/src/VBox/VMM/VMMAll/PGMAllMap.cpp +++ b/src/VBox/VMM/VMMAll/PGMAllMap.cpp @@ -258,11 +258,13 @@ VMMDECL(int) PGMMapGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS return VERR_NOT_FOUND; } +#ifndef PGM_WITHOUT_MAPPINGS -#ifdef VBOX_WITH_RAW_MODE_NOT_R0 /** * Sets all PDEs involved with the mapping in the shadow page table. * + * Ignored if mappings are disabled (i.e. if HM is enabled). + * * @param pVM Pointer to the VM. * @param pMap Pointer to the mapping in question. * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping. @@ -271,8 +273,7 @@ void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE) { Log4(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled(pVM))); - if ( !pgmMapAreMappingsEnabled(pVM) - || pVM->cCpus > 1) + if (!pgmMapAreMappingsEnabled(pVM)) return; /* This only applies to raw mode where we only support 1 VCPU. */ @@ -355,7 +356,7 @@ void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE) AssertFatal(pPoolPagePd); if (!pgmPoolIsPageLocked(pPoolPagePd)) pgmPoolLockPage(pPool, pPoolPagePd); -#ifdef VBOX_STRICT +# ifdef VBOX_STRICT else if (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING) { Assert(PGMGetGuestMode(pVCpu) >= PGMMODE_PAE); /** @todo We may hit this during reset, will fix later. */ @@ -367,7 +368,7 @@ void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE) || !PGMMODE_WITH_PAGING(PGMGetGuestMode(pVCpu)), ("%RX64 vs %RX64\n", pShwPaePd->a[iPaePde+1].u & X86_PDE_PAE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1)); } -#endif +# endif /* * Insert our first PT, freeing anything we might be replacing unless it's a mapping (i.e. us). @@ -415,6 +416,8 @@ void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE) /** * Clears all PDEs involved with the mapping in the shadow page table. * + * Ignored if mappings are disabled (i.e. if HM is enabled). + * * @param pVM Pointer to the VM. * @param pShwPageCR3 CR3 root page * @param pMap Pointer to the mapping in question. @@ -426,10 +429,9 @@ void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, Log(("pgmMapClearShadowPDEs: old pde %x (cPTs=%x) (mappings enabled %d) fDeactivateCR3=%RTbool\n", iOldPDE, pMap->cPTs, pgmMapAreMappingsEnabled(pVM), fDeactivateCR3)); /* - * Skip this if disabled or if it doesn't apply. + * Skip this if it doesn't apply. */ - if ( !pgmMapAreMappingsEnabled(pVM) - || pVM->cCpus > 1) + if (!pgmMapAreMappingsEnabled(pVM)) return; Assert(pShwPageCR3); @@ -539,9 +541,10 @@ void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, PGM_DYNMAP_UNUSED_HINT_VM(pVM, pCurrentShwPdpt); } -#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ +#endif /* PGM_WITHOUT_MAPPINGS */ #if defined(VBOX_STRICT) && !defined(IN_RING0) + /** * Clears all PDEs involved with the mapping in the shadow page table. * @@ -622,6 +625,8 @@ static void pgmMapCheckShadowPDEs(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pShwPageCR /** * Check the hypervisor mappings in the active CR3. * + * Ignored if mappings are disabled (i.e. if HM is enabled). + * * @param pVM The virtual machine. */ VMMDECL(void) PGMMapCheck(PVM pVM) @@ -648,13 +653,15 @@ VMMDECL(void) PGMMapCheck(PVM pVM) } pgmUnlock(pVM); } -#endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */ -#ifdef VBOX_WITH_RAW_MODE_NOT_R0 +#endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */ +#ifndef PGM_WITHOUT_MAPPINGS /** * Apply the hypervisor mappings to the active CR3. * + * Ignored if mappings are disabled (i.e. if HM is enabled). + * * @returns VBox status. * @param pVM The virtual machine. * @param pShwPageCR3 CR3 root page @@ -662,10 +669,9 @@ VMMDECL(void) PGMMapCheck(PVM pVM) int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3) { /* - * Skip this if disabled or if it doesn't apply. + * Skip this if it doesn't apply. */ - if ( !pgmMapAreMappingsEnabled(pVM) - || pVM->cCpus > 1) + if (!pgmMapAreMappingsEnabled(pVM)) return VINF_SUCCESS; /* Note! This might not be logged successfully in RC because we usually @@ -692,6 +698,8 @@ int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3) /** * Remove the hypervisor mappings from the specified CR3 * + * Ignored if mappings are disabled (i.e. if HM is enabled). + * * @returns VBox status. * @param pVM The virtual machine. * @param pShwPageCR3 CR3 root page @@ -699,10 +707,9 @@ int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3) int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3) { /* - * Skip this if disabled or if it doesn't apply. + * Skip this if it doesn't apply. */ - if ( !pgmMapAreMappingsEnabled(pVM) - || pVM->cCpus > 1) + if (!pgmMapAreMappingsEnabled(pVM)) return VINF_SUCCESS; Assert(pShwPageCR3); @@ -734,8 +741,7 @@ VMMDECL(bool) PGMMapHasConflicts(PVM pVM) */ if (!pgmMapAreMappingsFloating(pVM)) return false; - - Assert(pVM->cCpus == 1); + AssertReturn(pgmMapAreMappingsEnabled(pVM), false); /* This only applies to raw mode where we only support 1 VCPU. */ PVMCPU pVCpu = &pVM->aCpus[0]; @@ -764,17 +770,17 @@ VMMDECL(bool) PGMMapHasConflicts(PVM pVM) { STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts); -#ifdef IN_RING3 +# ifdef IN_RING3 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n" " iPDE=%#x iPT=%#x PDE=%RGp.\n", (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc, iPDE, iPT, pPD->a[iPDE + iPT].au32[0])); -#else +# else Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n" " iPDE=%#x iPT=%#x PDE=%RGp.\n", (iPT + iPDE) << X86_PD_SHIFT, iPDE, iPT, pPD->a[iPDE + iPT].au32[0])); -#endif +# endif return true; } } @@ -795,15 +801,15 @@ VMMDECL(bool) PGMMapHasConflicts(PVM pVM) && (EMIsRawRing0Enabled(pVM) || Pde.n.u1User)) { STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts); -#ifdef IN_RING3 +# ifdef IN_RING3 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n" " PDE=%016RX64.\n", GCPtr, pCur->pszDesc, Pde.u)); -#else +# else Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n" " PDE=%016RX64.\n", GCPtr, Pde.u)); -#endif +# endif return true; } GCPtr += (1 << X86_PD_PAE_SHIFT); @@ -827,7 +833,7 @@ int pgmMapResolveConflicts(PVM pVM) { /* The caller is expected to check these two conditions. */ Assert(!pVM->pgm.s.fMappingsFixed); - Assert(!pVM->pgm.s.fMappingsDisabled); + Assert(pgmMapAreMappingsEnabled(pVM)); /* This only applies to raw mode where we only support 1 VCPU. */ Assert(pVM->cCpus == 1); @@ -859,7 +865,7 @@ int pgmMapResolveConflicts(PVM pVM) { STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts); -#ifdef IN_RING3 +# ifdef IN_RING3 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n" " iPDE=%#x iPT=%#x PDE=%RGp.\n", (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc, @@ -867,13 +873,13 @@ int pgmMapResolveConflicts(PVM pVM) int rc = pgmR3SyncPTResolveConflict(pVM, pCur, pPD, iPDE << X86_PD_SHIFT); AssertRCReturn(rc, rc); break; -#else +# else Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n" " iPDE=%#x iPT=%#x PDE=%RGp.\n", (iPT + iPDE) << X86_PD_SHIFT, iPDE, iPT, pPD->a[iPDE + iPT].au32[0])); return VINF_PGM_SYNC_CR3; -#endif +# endif } } pCur = pNext; @@ -924,5 +930,5 @@ int pgmMapResolveConflicts(PVM pVM) return VINF_SUCCESS; } -#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ +#endif /* PGM_WITHOUT_MAPPINGS */ diff --git a/src/VBox/VMM/VMMAll/PGMAllPhys.cpp b/src/VBox/VMM/VMMAll/PGMAllPhys.cpp index 0bb156e6..9d6c7f98 100644 --- a/src/VBox/VMM/VMMAll/PGMAllPhys.cpp +++ b/src/VBox/VMM/VMMAll/PGMAllPhys.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2011 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -119,8 +119,6 @@ VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE p return VINF_SUCCESS; } } - else if (RT_UNLIKELY(rc == VERR_EM_INTERNAL_DISAS_ERROR)) - return rc; break; } @@ -489,7 +487,7 @@ static int pgmPhysEnsureHandyPage(PVM pVM) #endif { Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n", - pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) )); + pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )); #ifdef IN_RING3 int rc = PGMR3PhysAllocateHandyPages(pVM); #else @@ -505,8 +503,8 @@ static int pgmPhysEnsureHandyPage(PVM pVM) LogRel(("PGM: no more handy pages!\n")); return VERR_EM_NO_MEMORY; } - Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES)); - Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY)); + Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES)); + Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)); #ifdef IN_RING3 # ifdef VBOX_WITH_REM REMR3NotifyFF(pVM); @@ -572,7 +570,7 @@ int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys) */ PGM_LOCK_ASSERT_OWNER(pVM); AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys)); - Assert(!PGM_PAGE_IS_MMIO(pPage)); + Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); # ifdef PGM_WITH_LARGE_PAGES /* @@ -621,7 +619,7 @@ int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys) /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */ PGM_LOCK_ASSERT_OWNER(pVM); AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys)); - Assert(!PGM_PAGE_IS_MMIO(pPage)); + Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); uint32_t iHandyPage = --pVM->pgm.s.cHandyPages; AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage)); @@ -1029,35 +1027,42 @@ static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMP /* - * Special case: ZERO and MMIO2 pages. + * Special cases: MMIO2, ZERO and specially aliased MMIO pages. */ + if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2 + || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO) + { + /* Decode the page id to a page in a MMIO2 ram range. */ + uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage)); + uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage)); + AssertLogRelReturn((uint8_t)(idMmio2 - 1U)< RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), + VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); + PPGMMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1]; + AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); + AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); + AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); + *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << PAGE_SHIFT); + *ppMap = NULL; + return VINF_SUCCESS; + } + const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage); if (idChunk == NIL_GMM_CHUNKID) { - AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_PGM_PHYS_PAGE_MAP_IPE_1); - if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2) - { - /* Lookup the MMIO2 range and use pvR3 to calc the address. */ - PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys); - AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_PGM_PHYS_PAGE_MAP_IPE_2); - *ppv = (void *)((uintptr_t)pRam->pvR3 + (uintptr_t)((GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK) - pRam->GCPhys)); - } - else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO) + AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), + VERR_PGM_PHYS_PAGE_MAP_IPE_1); + if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage)) { - /** @todo deal with aliased MMIO2 pages somehow... - * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for - * them, that would also avoid this mess. It would actually be kind of - * elegant... */ - AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_PGM_MAP_MMIO2_ALIAS_MMIO); + AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), + VERR_PGM_PHYS_PAGE_MAP_IPE_3); + AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage), + VERR_PGM_PHYS_PAGE_MAP_IPE_4); + *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg); } else { - /** @todo handle MMIO2 */ - AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_PGM_PHYS_PAGE_MAP_IPE_3); - AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, - ("pPage=%R[pgmpage]\n", pPage), - VERR_PGM_PHYS_PAGE_MAP_IPE_4); - *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg); + static uint8_t s_abPlayItSafe[0x1000*2]; /* I don't dare return the zero page at the moment. */ + *ppv = (uint8_t *)((uintptr_t)&s_abPlayItSafe[0x1000] & ~(uintptr_t)0xfff); } *ppMap = NULL; return VINF_SUCCESS; @@ -1685,7 +1690,7 @@ VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const ** rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage); if (RT_SUCCESS(rc)) { - if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage))) + if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))) rc = VERR_PGM_PHYS_PAGE_RESERVED; else { @@ -1717,7 +1722,7 @@ VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const ** { /* MMIO pages doesn't have any readable backing. */ PPGMPAGE pPage = pTlbe->pPage; - if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage))) + if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))) rc = VERR_PGM_PHYS_PAGE_RESERVED; else { @@ -2100,7 +2105,8 @@ static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pv #ifdef IN_RING3 PPGMPHYSHANDLER pPhys = NULL; #endif - if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL) + if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL + || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)) { #ifdef IN_RING3 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys); @@ -2237,7 +2243,8 @@ VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead) /* * Any ALL access handlers? */ - if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))) + if (RT_UNLIKELY( PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) + || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))) { int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb); if (RT_FAILURE(rc)) @@ -2336,8 +2343,8 @@ static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void co * write area. This should be a pretty frequent case with MMIO and * the heavy usage of full page handlers in the page pool. */ - if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage) - || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */) + if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage) + || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage) /* screw virtual handlers on MMIO pages */) { PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys); if (pCur) @@ -2357,7 +2364,7 @@ static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void co #else /* IN_RING3 */ Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) )); - if (!PGM_PAGE_IS_MMIO(pPage)) + if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)) rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck); else rc = VINF_SUCCESS; @@ -2405,7 +2412,7 @@ static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void co #endif /* IN_RING3 */ } /* else: the handler is somewhere else in the page, deal with it below. */ - Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */ + Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all PAGE_SIZEed! */ } /* * A virtual handler without any interfering physical handlers. @@ -2772,7 +2779,8 @@ VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cb /* * Any active WRITE or ALL access handlers? */ - if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) + if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) + || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage)) { int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb); if (RT_FAILURE(rc)) @@ -4016,22 +4024,25 @@ VMMDECL(PGMPAGETYPE) PGMPhysGetPageType(PVM pVM, RTGCPHYS GCPhys) * accesses or is odd in any way. * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist. * - * @param pVM Pointer to the VM. - * @param GCPhys The GC physical address to convert. Since this is only - * used for filling the REM TLB, the A20 mask must be - * applied before calling this API. + * @param pVM Pointer to the cross context VM structure. + * @param pVCpu Pointer to the cross context virtual CPU structure of + * the calling EMT. + * @param GCPhys The GC physical address to convert. This API mask the + * A20 line when necessary. * @param fWritable Whether write access is required. * @param ppv Where to store the pointer corresponding to GCPhys on * success. * @param pLock * * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr. + * @thread EMT(pVCpu). */ -VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers, +VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers, void **ppv, PPGMPAGEMAPLOCK pLock) { + PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys); + pgmLock(pVM); - PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys); PPGMRAMRANGE pRam; PPGMPAGE pPage; @@ -4040,6 +4051,8 @@ VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, { if (PGM_PAGE_IS_BALLOONED(pPage)) rc = VERR_PGM_PHYS_TLB_CATCH_WRITE; + else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage)) + rc = VERR_PGM_PHYS_TLB_CATCH_ALL; else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage) || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) ) rc = VINF_SUCCESS; @@ -4077,7 +4090,6 @@ VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, } #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) - PVMCPU pVCpu = VMMGetCpu(pVM); void *pv; rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), @@ -4117,3 +4129,62 @@ VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, return rc; } + +/** + * Checks if the give GCPhys page requires special handling for the given access + * because it's MMIO or otherwise monitored. + * + * @returns VBox status code (no informational statuses). + * @retval VINF_SUCCESS on success. + * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write + * access handler of some kind. + * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all + * accesses or is odd in any way. + * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist. + * + * @param pVM Pointer to the VM. + * @param GCPhys The GC physical address to convert. Since this is only + * used for filling the REM TLB, the A20 mask must be + * applied before calling this API. + * @param fWritable Whether write access is required. + * + * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just + * a stop gap thing that should be removed once there is a better TLB + * for virtual address accesses. + */ +VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVM pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers) +{ + pgmLock(pVM); + PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys); + + PPGMRAMRANGE pRam; + PPGMPAGE pPage; + int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam); + if (RT_SUCCESS(rc)) + { + if (PGM_PAGE_IS_BALLOONED(pPage)) + rc = VERR_PGM_PHYS_TLB_CATCH_WRITE; + else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage)) + rc = VERR_PGM_PHYS_TLB_CATCH_ALL; + else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage) + || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) ) + rc = VINF_SUCCESS; + else + { + if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */ + { + Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage)); + rc = VERR_PGM_PHYS_TLB_CATCH_ALL; + } + else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable) + { + Assert(!fByPassHandlers); + rc = VERR_PGM_PHYS_TLB_CATCH_WRITE; + } + } + } + + pgmUnlock(pVM); + return rc; +} + diff --git a/src/VBox/VMM/VMMAll/PGMAllPool.cpp b/src/VBox/VMM/VMMAll/PGMAllPool.cpp index 952e8b59..30fbd014 100644 --- a/src/VBox/VMM/VMMAll/PGMAllPool.cpp +++ b/src/VBox/VMM/VMMAll/PGMAllPool.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2012 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -31,7 +31,7 @@ #include <VBox/vmm/vm.h> #include "PGMInline.h" #include <VBox/disopcode.h> -#include <VBox/vmm/hwacc_vmx.h> +#include <VBox/vmm/hm_vmx.h> #include <VBox/log.h> #include <VBox/err.h> @@ -46,13 +46,14 @@ RT_C_DECLS_BEGIN DECLINLINE(unsigned) pgmPoolTrackGetShadowEntrySize(PGMPOOLKIND enmKind); DECLINLINE(unsigned) pgmPoolTrackGetGuestEntrySize(PGMPOOLKIND enmKind); +static void pgmPoolTrackClearPageUsers(PPGMPOOL pPool, PPGMPOOLPAGE pPage); static void pgmPoolTrackDeref(PPGMPOOL pPool, PPGMPOOLPAGE pPage); static int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable); static void pgmPoolMonitorModifiedRemove(PPGMPOOL pPool, PPGMPOOLPAGE pPage); #ifndef IN_RING3 DECLEXPORT(int) pgmPoolAccessHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser); #endif -#ifdef LOG_ENABLED +#if defined(LOG_ENABLED) || defined(VBOX_STRICT) static const char *pgmPoolPoolKindToStr(uint8_t enmKind); #endif #if 0 /*defined(VBOX_STRICT) && defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT)*/ @@ -423,7 +424,7 @@ void pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPag } #if 0 /* useful when running PGMAssertCR3(), a bit too troublesome for general use (TLBs). */ if ( uShw.pPD->a[iShw].n.u1Present - && !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)) + && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)) { LogFlow(("pgmPoolMonitorChainChanging: iShw=%#x: %RX32 -> freeing it!\n", iShw, uShw.pPD->a[iShw].u)); # ifdef IN_RC /* TLB load - we're pushing things a bit... */ @@ -741,7 +742,7 @@ DECLINLINE(bool) pgmPoolMonitorIsReused(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pReg { #ifndef IN_RC /** @todo could make this general, faulting close to rsp should be a safe reuse heuristic. */ - if ( HWACCMHasPendingIrq(pVM) + if ( HMHasPendingIrq(pVM) && (pRegFrame->rsp - pvFault) < 32) { /* Fault caused by stack writes while trying to inject an interrupt event. */ @@ -755,7 +756,7 @@ DECLINLINE(bool) pgmPoolMonitorIsReused(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pReg LogFlow(("Reused instr %RGv %d at %RGv param1.fUse=%llx param1.reg=%d\n", pRegFrame->rip, pDis->pCurInstr->uOpcode, pvFault, pDis->Param1.fUse, pDis->Param1.Base.idxGenReg)); /* Non-supervisor mode write means it's used for something else. */ - if (CPUMGetGuestCPL(pVCpu) != 0) + if (CPUMGetGuestCPL(pVCpu) == 3) return true; switch (pDis->pCurInstr->uOpcode) @@ -842,7 +843,7 @@ static int pgmPoolAccessHandlerFlush(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGM else if (rc2 == VINF_EM_RESCHEDULE) { if (rc == VINF_SUCCESS) - rc = rc2; + rc = VBOXSTRICTRC_VAL(rc2); # ifndef IN_RING3 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); # endif @@ -977,11 +978,21 @@ DECLINLINE(int) pgmPoolAccessHandlerSimple(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool * Clear all the pages. ASSUMES that pvFault is readable. */ #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) - uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); - pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->Param1)); + uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); +#endif + + uint32_t cbWrite = DISGetParamSize(pDis, &pDis->Param1); + if (cbWrite <= 8) + pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, cbWrite); + else + { + Assert(cbWrite <= 16); + pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, 8); + pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault + 8, pvFault + 8, cbWrite - 8); + } + +#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); -#else - pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->Param1)); #endif /* @@ -1070,7 +1081,7 @@ DECLEXPORT(int) pgmPoolAccessHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE #ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT if (pPage->fDirty) { - Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH)); + Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH)); pgmUnlock(pVM); return VINF_SUCCESS; /* SMP guest case where we were blocking on the pgm lock while the same page was being marked dirty. */ } @@ -1786,7 +1797,11 @@ void pgmPoolAddDirtyPage(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage) } Assert(pPool->cDirtyPages == RT_ELEMENTS(pPool->aDirtyPages) || pPool->aDirtyPages[pPool->idxFreeDirtyPage].uIdx == NIL_PGMPOOL_IDX); - return; + + /* + * Clear all references to this shadow table. See @bugref{7298}. + */ + pgmPoolTrackClearPageUsers(pPool, pPage); } # endif /* !IN_RING3 */ @@ -2013,7 +2028,7 @@ static int pgmPoolCacheFreeOne(PPGMPOOL pPool, uint16_t iUser) for (unsigned iLoop = 0; ; iLoop++) { uint16_t iToFree = pPool->iAgeTail; - if (iToFree == iUser) + if (iToFree == iUser && iUser != NIL_PGMPOOL_IDX) iToFree = pPool->aPages[iToFree].iAgePrev; /* This is the alternative to the SyncCR3 pgmPoolCacheUsed calls. if (pPool->aPages[iToFree].iUserHead != NIL_PGMPOOL_USER_INDEX) @@ -2172,8 +2187,10 @@ static bool pgmPoolCacheReusedByKind(PGMPOOLKIND enmKind1, PGMPOOLKIND enmKind2) * @param enmKind The kind of mapping. * @param enmAccess Access type for the mapping (only relevant for big pages) * @param fA20Enabled Whether the CPU has the A20 gate enabled. - * @param iUser The shadow page pool index of the user table. - * @param iUserTable The index into the user table (shadowed). + * @param iUser The shadow page pool index of the user table. This is + * NIL_PGMPOOL_IDX for root pages. + * @param iUserTable The index into the user table (shadowed). Ignored if + * root page * @param ppPage Where to store the pointer to the page. */ static int pgmPoolCacheAlloc(PPGMPOOL pPool, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, bool fA20Enabled, @@ -2201,7 +2218,9 @@ static int pgmPoolCacheAlloc(PPGMPOOL pPool, RTGCPHYS GCPhys, PGMPOOLKIND enmKin */ pgmPoolCacheUsed(pPool, pPage); - int rc = pgmPoolTrackAddUser(pPool, pPage, iUser, iUserTable); + int rc = VINF_SUCCESS; + if (iUser != NIL_PGMPOOL_IDX) + rc = pgmPoolTrackAddUser(pPool, pPage, iUser, iUserTable); if (RT_SUCCESS(rc)) { Assert((PGMPOOLKIND)pPage->enmKind == enmKind); @@ -2490,7 +2509,7 @@ static int pgmPoolMonitorInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage) * the heap size should suffice. */ AssertFatalMsgRC(rc, ("PGMHandlerPhysicalRegisterEx %RGp failed with %Rrc\n", GCPhysPage, rc)); PVMCPU pVCpu = VMMGetCpu(pVM); - AssertFatalMsg(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), ("fSyncFlags=%x syncff=%d\n", pVCpu->pgm.s.fSyncFlags, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))); + AssertFatalMsg(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), ("fSyncFlags=%x syncff=%d\n", pVCpu->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))); } pPage->fMonitored = true; return rc; @@ -2587,7 +2606,7 @@ static int pgmPoolMonitorFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage) rc = PGMHandlerPhysicalDeregister(pVM, pPage->GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK); AssertFatalRC(rc); PVMCPU pVCpu = VMMGetCpu(pVM); - AssertFatalMsg(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), + AssertFatalMsg(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), ("%#x %#x\n", pVCpu->pgm.s.fSyncFlags, pVM->fGlobalForcedActions)); } pPage->fMonitored = false; @@ -2804,43 +2823,49 @@ DECLINLINE(int) pgmPoolTrackInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS LogFlow(("pgmPoolTrackInsert GCPhys=%RGp iUser=%d iUserTable=%x\n", GCPhys, iUser, iUserTable)); -#ifdef VBOX_STRICT - /* - * Check that the entry doesn't already exists. - */ - if (pPage->iUserHead != NIL_PGMPOOL_USER_INDEX) + if (iUser != NIL_PGMPOOL_IDX) { - uint16_t i = pPage->iUserHead; - do +#ifdef VBOX_STRICT + /* + * Check that the entry doesn't already exists. + */ + if (pPage->iUserHead != NIL_PGMPOOL_USER_INDEX) { - Assert(i < pPool->cMaxUsers); - AssertMsg(paUsers[i].iUser != iUser || paUsers[i].iUserTable != iUserTable, ("%x %x vs new %x %x\n", paUsers[i].iUser, paUsers[i].iUserTable, iUser, iUserTable)); - i = paUsers[i].iNext; - } while (i != NIL_PGMPOOL_USER_INDEX); - } + uint16_t i = pPage->iUserHead; + do + { + Assert(i < pPool->cMaxUsers); + AssertMsg(paUsers[i].iUser != iUser || paUsers[i].iUserTable != iUserTable, ("%x %x vs new %x %x\n", paUsers[i].iUser, paUsers[i].iUserTable, iUser, iUserTable)); + i = paUsers[i].iNext; + } while (i != NIL_PGMPOOL_USER_INDEX); + } #endif - /* - * Find free a user node. - */ - uint16_t i = pPool->iUserFreeHead; - if (i == NIL_PGMPOOL_USER_INDEX) - { - rc = pgmPoolTrackFreeOneUser(pPool, iUser); - if (RT_FAILURE(rc)) - return rc; - i = pPool->iUserFreeHead; + /* + * Find free a user node. + */ + uint16_t i = pPool->iUserFreeHead; + if (i == NIL_PGMPOOL_USER_INDEX) + { + rc = pgmPoolTrackFreeOneUser(pPool, iUser); + if (RT_FAILURE(rc)) + return rc; + i = pPool->iUserFreeHead; + } + + /* + * Unlink the user node from the free list, + * initialize and insert it into the user list. + */ + pPool->iUserFreeHead = paUsers[i].iNext; + paUsers[i].iNext = NIL_PGMPOOL_USER_INDEX; + paUsers[i].iUser = iUser; + paUsers[i].iUserTable = iUserTable; + pPage->iUserHead = i; } + else + pPage->iUserHead = NIL_PGMPOOL_USER_INDEX; - /* - * Unlink the user node from the free list, - * initialize and insert it into the user list. - */ - pPool->iUserFreeHead = paUsers[i].iNext; - paUsers[i].iNext = NIL_PGMPOOL_USER_INDEX; - paUsers[i].iUser = iUser; - paUsers[i].iUserTable = iUserTable; - pPage->iUserHead = i; /* * Insert into cache and enable monitoring of the guest page if enabled. @@ -2880,9 +2905,9 @@ DECLINLINE(int) pgmPoolTrackInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS */ static int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable) { + Log3(("pgmPoolTrackAddUser: GCPhys=%RGp iUser=%%x iUserTable=%x\n", pPage->GCPhys, iUser, iUserTable)); PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers); - - Log3(("pgmPoolTrackAddUser GCPhys = %RGp iUser %x iUserTable %x\n", pPage->GCPhys, iUser, iUserTable)); + Assert(iUser != NIL_PGMPOOL_IDX); # ifdef VBOX_STRICT /* @@ -2895,8 +2920,8 @@ static int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUse do { Assert(i < pPool->cMaxUsers); - AssertMsg(iUser != PGMPOOL_IDX_PD || iUser != PGMPOOL_IDX_PDPT || iUser != PGMPOOL_IDX_NESTED_ROOT || iUser != PGMPOOL_IDX_AMD64_CR3 || - paUsers[i].iUser != iUser || paUsers[i].iUserTable != iUserTable, ("%x %x vs new %x %x\n", paUsers[i].iUser, paUsers[i].iUserTable, iUser, iUserTable)); + /** @todo this assertion looks odd... Shouldn't it be && here? */ + AssertMsg(paUsers[i].iUser != iUser || paUsers[i].iUserTable != iUserTable, ("%x %x vs new %x %x\n", paUsers[i].iUser, paUsers[i].iUserTable, iUser, iUserTable)); i = paUsers[i].iNext; } while (i != NIL_PGMPOOL_USER_INDEX); } @@ -2946,15 +2971,19 @@ static int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUse * @param HCPhys The HC physical address of the shadow page. * @param iUser The shadow page pool index of the user table. * @param iUserTable The index into the user table (shadowed). + * + * @remarks Don't call this for root pages. */ static void pgmPoolTrackFreeUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable) { + Log3(("pgmPoolTrackFreeUser %RGp %x %x\n", pPage->GCPhys, iUser, iUserTable)); + PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers); + Assert(iUser != NIL_PGMPOOL_IDX); + /* * Unlink and free the specified user entry. */ - PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers); - Log3(("pgmPoolTrackFreeUser %RGp %x %x\n", pPage->GCPhys, iUser, iUserTable)); /* Special: For PAE and 32-bit paging, there is usually no more than one user. */ uint16_t i = pPage->iUserHead; if ( i != NIL_PGMPOOL_USER_INDEX @@ -4898,7 +4927,9 @@ int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fFlush) * @param pPool The pool. * @param HCPhys The HC physical address of the shadow page. * @param iUser The shadow page pool index of the user table. - * @param iUserTable The index into the user table (shadowed). + * NIL_PGMPOOL_IDX for root pages. + * @param iUserTable The index into the user table (shadowed). Ignored if + * root page. */ void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable) { @@ -4910,7 +4941,8 @@ void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint3 AssertReturnVoid(pPage->idx >= PGMPOOL_IDX_FIRST); /* paranoia (#6349) */ pgmLock(pVM); - pgmPoolTrackFreeUser(pPool, pPage, iUser, iUserTable); + if (iUser != NIL_PGMPOOL_IDX) + pgmPoolTrackFreeUser(pPool, pPage, iUser, iUserTable); if (!pPage->fCached) pgmPoolFlushPage(pPool, pPage); pgmUnlock(pVM); @@ -4932,7 +4964,7 @@ void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint3 static int pgmPoolMakeMoreFreePages(PPGMPOOL pPool, PGMPOOLKIND enmKind, uint16_t iUser) { PVM pVM = pPool->CTX_SUFF(pVM); - LogFlow(("pgmPoolMakeMoreFreePages: iUser=%d\n", iUser)); + LogFlow(("pgmPoolMakeMoreFreePages: enmKind=%d iUser=%d\n", enmKind, iUser)); NOREF(enmKind); /* @@ -4984,8 +5016,10 @@ static int pgmPoolMakeMoreFreePages(PPGMPOOL pPool, PGMPOOLKIND enmKind, uint16_ * @param enmKind The kind of mapping. * @param enmAccess Access type for the mapping (only relevant for big pages) * @param fA20Enabled Whether the A20 gate is enabled or not. - * @param iUser The shadow page pool index of the user table. - * @param iUserTable The index into the user table (shadowed). + * @param iUser The shadow page pool index of the user table. Root + * pages should pass NIL_PGMPOOL_IDX. + * @param iUserTable The index into the user table (shadowed). Ignored for + * root pages (iUser == NIL_PGMPOOL_IDX). * @param fLockPage Lock the page * @param ppPage Where to store the pointer to the page. NULL is stored here on failure. */ @@ -5118,7 +5152,9 @@ int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS en * @param pVM Pointer to the VM. * @param HCPhys The HC physical address of the shadow page. * @param iUser The shadow page pool index of the user table. - * @param iUserTable The index into the user table (shadowed). + * NIL_PGMPOOL_IDX if root page. + * @param iUserTable The index into the user table (shadowed). Ignored if + * root page. */ void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable) { @@ -5406,41 +5442,6 @@ void pgmR3PoolReset(PVM pVM) /* * Reinsert active pages into the hash and ensure monitoring chains are correct. */ - for (unsigned i = PGMPOOL_IDX_FIRST_SPECIAL; i < PGMPOOL_IDX_FIRST; i++) - { - PPGMPOOLPAGE pPage = &pPool->aPages[i]; - - /** @todo r=bird: Is this code still needed in any way? The special root - * pages should not be monitored or anything these days AFAIK. */ - Assert(pPage->iNext == NIL_PGMPOOL_IDX); - Assert(pPage->iModifiedNext == NIL_PGMPOOL_IDX); - Assert(pPage->iModifiedPrev == NIL_PGMPOOL_IDX); - Assert(pPage->iMonitoredNext == NIL_PGMPOOL_IDX); - Assert(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX); - Assert(!pPage->fMonitored); - - pPage->iNext = NIL_PGMPOOL_IDX; - pPage->iModifiedNext = NIL_PGMPOOL_IDX; - pPage->iModifiedPrev = NIL_PGMPOOL_IDX; - pPage->cModifications = 0; - /* ASSUMES that we're not sharing with any of the other special pages (safe for now). */ - pPage->iMonitoredNext = NIL_PGMPOOL_IDX; - pPage->iMonitoredPrev = NIL_PGMPOOL_IDX; - if (pPage->fMonitored) - { - int rc = PGMHandlerPhysicalChangeCallbacks(pVM, pPage->GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK, - pPool->pfnAccessHandlerR3, MMHyperCCToR3(pVM, pPage), - pPool->pfnAccessHandlerR0, MMHyperCCToR0(pVM, pPage), - pPool->pfnAccessHandlerRC, MMHyperCCToRC(pVM, pPage), - pPool->pszAccessHandler); - AssertFatalRCSuccess(rc); - pgmPoolHashInsert(pPool, pPage); - } - Assert(pPage->iUserHead == NIL_PGMPOOL_USER_INDEX); /* for now */ - Assert(pPage->iAgeNext == NIL_PGMPOOL_IDX); - Assert(pPage->iAgePrev == NIL_PGMPOOL_IDX); - } - for (VMCPUID i = 0; i < pVM->cCpus; i++) { /* @@ -5457,7 +5458,7 @@ void pgmR3PoolReset(PVM pVM) #endif /* IN_RING3 */ -#ifdef LOG_ENABLED +#if defined(LOG_ENABLED) || defined(VBOX_STRICT) /** * Stringifies a PGMPOOLKIND value. */ @@ -5528,5 +5529,5 @@ static const char *pgmPoolPoolKindToStr(uint8_t enmKind) } return "Unknown kind!"; } -#endif /* LOG_ENABLED*/ +#endif /* LOG_ENABLED || VBOX_STRICT */ diff --git a/src/VBox/VMM/VMMAll/PGMAllShw.h b/src/VBox/VMM/VMMAll/PGMAllShw.h index e56c722b..41d6ffab 100644 --- a/src/VBox/VMM/VMMAll/PGMAllShw.h +++ b/src/VBox/VMM/VMMAll/PGMAllShw.h @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2010 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -51,7 +51,6 @@ #undef SHW_PDPT_SHIFT #undef SHW_PDPT_MASK #undef SHW_PDPE_PG_MASK -#undef SHW_POOL_ROOT_IDX #if PGM_SHW_TYPE == PGM_TYPE_32BIT # define SHWPT X86PT @@ -84,7 +83,6 @@ # define SHW_PTE_SET_RW(Pte) do { (Pte).n.u1Write = 1; } while (0) # define SHW_PT_SHIFT X86_PT_SHIFT # define SHW_PT_MASK X86_PT_MASK -# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PD #elif PGM_SHW_TYPE == PGM_TYPE_EPT # define SHWPT EPTPT @@ -120,7 +118,6 @@ # define SHW_PDPT_MASK EPT_PDPT_MASK # define SHW_PDPE_PG_MASK EPT_PDPE_PG_MASK # define SHW_TOTAL_PD_ENTRIES (EPT_PG_AMD64_ENTRIES*EPT_PG_AMD64_PDPE_ENTRIES) -# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_NESTED_ROOT /* do not use! exception is real mode & protected mode without paging. */ #else # define SHWPT PGMSHWPTPAE @@ -158,14 +155,12 @@ # define SHW_PDPT_MASK X86_PDPT_MASK_AMD64 # define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK # define SHW_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES) -# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_AMD64_CR3 # else /* 32 bits PAE mode */ # define SHW_PDPT_SHIFT X86_PDPT_SHIFT # define SHW_PDPT_MASK X86_PDPT_MASK_PAE # define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK # define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES) -# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PDPT # endif #endif @@ -298,7 +293,8 @@ PGM_SHW_DECL(int, GetPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, P else /* mapping: */ { # if PGM_SHW_TYPE == PGM_TYPE_AMD64 \ - || PGM_SHW_TYPE == PGM_TYPE_EPT + || PGM_SHW_TYPE == PGM_TYPE_EPT \ + || defined(PGM_WITHOUT_MAPPINGS) AssertFailed(); /* can't happen */ pPT = NULL; /* shut up MSC */ # else @@ -464,7 +460,7 @@ PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64 AssertRC(rc); if (RT_SUCCESS(rc)) { - Assert(fGstPte & X86_PTE_RW); + Assert((fGstPte & X86_PTE_RW) || !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP /* allow netware hack */)); PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); Assert(pPage); if (pPage) @@ -478,7 +474,7 @@ PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64 SHW_PTE_ATOMIC_SET2(pPT->a[iPTE], NewPte); # if PGM_SHW_TYPE == PGM_TYPE_EPT - HWACCMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr); + HMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr); # else PGM_INVL_PG_ALL_VCPU(pVM, GCPtr); # endif diff --git a/src/VBox/VMM/VMMAll/REMAll.cpp b/src/VBox/VMM/VMMAll/REMAll.cpp index d119ab87..97498b46 100644 --- a/src/VBox/VMM/VMMAll/REMAll.cpp +++ b/src/VBox/VMM/VMMAll/REMAll.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -223,7 +223,7 @@ VMMDECL(void) REMNotifyHandlerPhysicalFlushIfAlmostFull(PVM pVM, PVMCPU pVCpu) if (++cFree >= 48) return; } - AssertRelease(VM_FF_ISSET(pVM, VM_FF_REM_HANDLER_NOTIFY)); + AssertRelease(VM_FF_IS_SET(pVM, VM_FF_REM_HANDLER_NOTIFY)); AssertRelease(pVM->rem.s.idxPendingList != UINT32_MAX); /* Ok, we gotta flush them. */ diff --git a/src/VBox/VMM/VMMAll/SELMAll.cpp b/src/VBox/VMM/VMMAll/SELMAll.cpp index 0b9a9c6d..317727dc 100644 --- a/src/VBox/VMM/VMMAll/SELMAll.cpp +++ b/src/VBox/VMM/VMMAll/SELMAll.cpp @@ -22,9 +22,11 @@ #define LOG_GROUP LOG_GROUP_SELM #include <VBox/vmm/selm.h> #include <VBox/vmm/stam.h> +#include <VBox/vmm/em.h> #include <VBox/vmm/mm.h> +#include <VBox/vmm/hm.h> #include <VBox/vmm/pgm.h> -#include <VBox/vmm/hwaccm.h> +#include <VBox/vmm/hm.h> #include "SELMInternal.h" #include <VBox/vmm/vm.h> #include <VBox/err.h> @@ -33,6 +35,8 @@ #include <VBox/vmm/vmm.h> #include <iprt/x86.h> +#include "SELMInline.h" + /******************************************************************************* * Global Variables * @@ -60,6 +64,7 @@ static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "F VMMDECL(RTGCPTR) SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr) { Assert(pVM->cCpus == 1 && !CPUMIsGuestInLongMode(VMMGetCpu(pVM))); /* DON'T USE! */ + Assert(!HMIsEnabled(pVM)); /** @todo check the limit. */ X86DESC Desc; @@ -183,7 +188,6 @@ VMMDECL(int) SELMToFlatEx(PVMCPU pVCpu, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, return VINF_SUCCESS; } - #ifdef VBOX_WITH_RAW_MODE_NOT_R0 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)) CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg); @@ -318,6 +322,7 @@ VMMDECL(int) SELMToFlatBySelEx(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL Sel, RTGCPT uint32_t fFlags, PRTGCPTR ppvGC, uint32_t *pcb) { Assert(!CPUMIsGuestInLongMode(pVCpu)); /* DON'T USE! (Accessing shadow GDT/LDT.) */ + Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM))); /* * Deal with real & v86 mode first. @@ -484,6 +489,8 @@ VMMDECL(int) SELMToFlatBySelEx(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL Sel, RTGCPT static void selLoadHiddenSelectorRegFromGuestTable(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg, RTGCPTR GCPtrDesc, RTSEL const Sel, uint32_t const iSReg) { + Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM))); + /* * Try read the entry. */ @@ -537,6 +544,7 @@ VMM_INT_DECL(void) SELMLoadHiddenSelectorReg(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUM PVM pVM = pVCpu->CTX_SUFF(pVM); Assert(pVM->cCpus == 1); + Assert(!HMIsEnabled(pVM)); /* @@ -630,6 +638,8 @@ DECLINLINE(int) selmValidateAndConvertCSAddrRawMode(PVM pVM, PVMCPU pVCpu, RTSEL PRTGCPTR ppvFlat, uint32_t *pcBits) { NOREF(pVCpu); + Assert(!HMIsEnabled(pVM)); + /** @todo validate limit! */ X86DESC Desc; if (!(SelCS & X86_SEL_LDT)) @@ -714,40 +724,27 @@ DECLINLINE(int) selmValidateAndConvertCSAddrHidden(PVMCPU pVCpu, RTSEL SelCPL, R if ( pSRegCS->Attr.n.u1DescType == 1 && (pSRegCS->Attr.n.u4Type & X86_SEL_TYPE_CODE)) { + /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0 + (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */ + if ( pSRegCS->Attr.n.u1Long + && CPUMIsGuestInLongMode(pVCpu)) + { + *ppvFlat = Addr; + return VINF_SUCCESS; + } + /* - * Check level. + * Limit check. Note that the limit in the hidden register is the + * final value. The granularity bit was included in its calculation. */ - unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL); - if ( !(pSRegCS->Attr.n.u4Type & X86_SEL_TYPE_CONF) - ? uLevel <= pSRegCS->Attr.n.u2Dpl - : uLevel >= pSRegCS->Attr.n.u2Dpl /* hope I got this right now... */ - ) + uint32_t u32Limit = pSRegCS->u32Limit; + if ((RTGCUINTPTR)Addr <= u32Limit) { - /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0 - (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */ - if ( pSRegCS->Attr.n.u1Long - && CPUMIsGuestInLongMode(pVCpu)) - { - *ppvFlat = Addr; - return VINF_SUCCESS; - } - - /* - * Limit check. Note that the limit in the hidden register is the - * final value. The granularity bit was included in its calculation. - */ - uint32_t u32Limit = pSRegCS->u32Limit; - if ((RTGCUINTPTR)Addr <= u32Limit) - { - *ppvFlat = Addr + pSRegCS->u64Base; - return VINF_SUCCESS; - } - - return VERR_OUT_OF_SELECTOR_BOUNDS; + *ppvFlat = Addr + pSRegCS->u64Base; + return VINF_SUCCESS; } - Log(("selmValidateAndConvertCSAddrHidden: Invalid RPL Attr.n.u4Type=%x cpl=%x dpl=%x\n", - pSRegCS->Attr.n.u4Type, uLevel, pSRegCS->Attr.n.u2Dpl)); - return VERR_INVALID_RPL; + + return VERR_OUT_OF_SELECTOR_BOUNDS; } return VERR_NOT_CODE_SELECTOR; } @@ -785,10 +782,10 @@ VMMDECL(int) SELMValidateAndConvertCSAddr(PVMCPU pVCpu, X86EFLAGS Efl, RTSEL Sel CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSRegCS); /* Undo ring compression. */ - if ((SelCPL & X86_SEL_RPL) == 1 && !HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM))) + if ((SelCPL & X86_SEL_RPL) == 1 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM))) SelCPL &= ~X86_SEL_RPL; Assert(pSRegCS->Sel == SelCS); - if ((SelCS & X86_SEL_RPL) == 1 && !HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM))) + if ((SelCS & X86_SEL_RPL) == 1 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM))) SelCS &= ~X86_SEL_RPL; #else Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS)); @@ -832,12 +829,31 @@ VMMDECL(void) SELMSetTrap8EIP(PVM pVM, uint32_t u32EIP) */ void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp) { + Assert(!HMIsEnabled(pVM)); Assert((ss & 1) || esp == 0); pVM->selm.s.Tss.ss1 = ss; pVM->selm.s.Tss.esp1 = (uint32_t)esp; } +#ifdef VBOX_WITH_RAW_RING1 +/** + * Sets ss:esp for ring1 in main Hypervisor's TSS. + * + * @param pVM Pointer to the VM. + * @param ss Ring2 SS register value. Pass 0 if invalid. + * @param esp Ring2 ESP register value. + */ +void selmSetRing2Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp) +{ + Assert(!HMIsEnabled(pVM)); + Assert((ss & 3) == 2 || esp == 0); + pVM->selm.s.Tss.ss2 = ss; + pVM->selm.s.Tss.esp2 = (uint32_t)esp; +} +#endif + + #ifdef VBOX_WITH_RAW_MODE_NOT_R0 /** * Gets ss:esp for ring1 in main Hypervisor's TSS. @@ -851,11 +867,14 @@ void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp) */ VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp) { + Assert(!HMIsEnabled(pVM)); Assert(pVM->cCpus == 1); PVMCPU pVCpu = &pVM->aCpus[0]; +#ifdef SELM_TRACK_GUEST_TSS_CHANGES if (pVM->selm.s.fSyncTSSRing0Stack) { +#endif RTGCPTR GCPtrTss = pVM->selm.s.GCPtrGuestTss; int rc; VBOXTSS tss; @@ -912,7 +931,9 @@ l_tryagain: /* Update our TSS structure for the guest's ring 1 stack */ selmSetRing1Stack(pVM, tss.ss0 | 1, (RTGCPTR32)tss.esp0); pVM->selm.s.fSyncTSSRing0Stack = false; +#ifdef SELM_TRACK_GUEST_TSS_CHANGES } +#endif *pSS = pVM->selm.s.Tss.ss1; *pEsp = (RTGCPTR32)pVM->selm.s.Tss.esp1; @@ -922,18 +943,7 @@ l_tryagain: #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ -/** - * Returns Guest TSS pointer - * - * @returns Pointer to the guest TSS, RTRCPTR_MAX if not being monitored. - * @param pVM Pointer to the VM. - */ -VMMDECL(RTGCPTR) SELMGetGuestTSS(PVM pVM) -{ - return (RTGCPTR)pVM->selm.s.GCPtrGuestTss; -} - -#ifdef VBOX_WITH_RAW_MODE_NOT_R0 +#if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)) /** * Gets the hypervisor code selector (CS). @@ -1007,7 +1017,7 @@ VMMDECL(RTRCPTR) SELMGetHyperGDT(PVM pVM) return (RTRCPTR)MMHyperR3ToRC(pVM, pVM->selm.s.paGdtR3); } -#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ +#endif /* defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)) */ /** * Gets info about the current TSS. @@ -1054,7 +1064,7 @@ VMMDECL(int) SELMGetTSSInfo(PVM pVM, PVMCPU pVCpu, PRTGCUINTPTR pGCPtrTss, PRTGC */ VMMDECL(void) SELMShadowCR3Changed(PVM pVM, PVMCPU pVCpu) { - /** @todo SMP support!! */ + /** @todo SMP support!! (64-bit guest scenario, primarily) */ pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVCpu); pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu); } diff --git a/src/VBox/VMM/VMMAll/TMAll.cpp b/src/VBox/VMM/VMMAll/TMAll.cpp index 41eff339..a1167a12 100644 --- a/src/VBox/VMM/VMMAll/TMAll.cpp +++ b/src/VBox/VMM/VMMAll/TMAll.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2011 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -74,18 +74,6 @@ /** - * Gets the current warp drive percent. - * - * @returns The warp drive percent. - * @param pVM Pointer to the VM. - */ -VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM) -{ - return pVM->tm.s.u32VirtualWarpDrivePercentage; -} - - -/** * Notification that execution is about to start. * * This call must always be paired with a TMNotifyEndOfExecution call. @@ -108,7 +96,7 @@ VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu) /** - * Notification that execution is about to start. + * Notification that execution has ended. * * This call must always be paired with a TMNotifyStartOfExecution call. * @@ -230,7 +218,7 @@ VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu) DECLINLINE(void) tmScheduleNotify(PVM pVM) { PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; - if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) + if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) { Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__)); VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); @@ -754,7 +742,7 @@ DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t /* * Return straight away if the timer FF is already set ... */ - if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) + if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet); /* @@ -773,9 +761,9 @@ DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t const int64_t i64Delta1 = u64Expire1 - u64Now; if (i64Delta1 <= 0) { - if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) + if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) { - Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); + Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_PENDING(pVCpuDst, VMCPU_FF_TIMER))); VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); #if defined(IN_RING3) && defined(VBOX_WITH_REM) REMR3NotifyTimerPending(pVM, pVCpuDst); @@ -819,9 +807,9 @@ DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t } if ( !pVM->tm.s.fRunningQueues - && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) + && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) { - Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); + Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_PENDING(pVCpuDst, VMCPU_FF_TIMER))); VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); #if defined(IN_RING3) && defined(VBOX_WITH_REM) REMR3NotifyTimerPending(pVM, pVCpuDst); @@ -894,7 +882,7 @@ DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t break; /* Got an consistent offset */ /* Repeat the initial checks before iterating. */ - if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) + if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet); if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues)) { @@ -918,9 +906,9 @@ DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t if (i64Delta2 <= 0) { if ( !pVM->tm.s.fRunningQueues - && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) + && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) { - Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); + Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_PENDING(pVCpuDst, VMCPU_FF_TIMER))); VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); #if defined(IN_RING3) && defined(VBOX_WITH_REM) REMR3NotifyTimerPending(pVM, pVCpuDst); diff --git a/src/VBox/VMM/VMMAll/TMAllCpu.cpp b/src/VBox/VMM/VMMAll/TMAllCpu.cpp index f41adfa4..7dc770cc 100644 --- a/src/VBox/VMM/VMMAll/TMAllCpu.cpp +++ b/src/VBox/VMM/VMMAll/TMAllCpu.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -320,8 +320,10 @@ DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers) else u64 = ASMReadTSC(); - /* Never return a value lower than what the guest has already seen. */ - if (u64 < pVCpu->tm.s.u64TSCLastSeen) + /* Always return a value higher than what the guest has already seen. */ + if (RT_LIKELY(u64 > pVCpu->tm.s.u64TSCLastSeen)) + pVCpu->tm.s.u64TSCLastSeen = u64; + else { STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow); pVCpu->tm.s.u64TSCLastSeen += 64; /* @todo choose a good increment here */ diff --git a/src/VBox/VMM/VMMAll/TMAllReal.cpp b/src/VBox/VMM/VMMAll/TMAllReal.cpp index 7c2e5a72..a68a441c 100644 --- a/src/VBox/VMM/VMMAll/TMAllReal.cpp +++ b/src/VBox/VMM/VMMAll/TMAllReal.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMAll/TMAllVirtual.cpp b/src/VBox/VMM/VMMAll/TMAllVirtual.cpp index 0fb69fb9..23901768 100644 --- a/src/VBox/VMM/VMMAll/TMAllVirtual.cpp +++ b/src/VBox/VMM/VMMAll/TMAllVirtual.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -337,7 +337,7 @@ DECLINLINE(uint64_t) tmVirtualGet(PVM pVM, bool fCheckTimers) if (fCheckTimers) { PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; - if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER) + if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER) && !pVM->tm.s.fRunningQueues && ( pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64 || ( pVM->tm.s.fVirtualSyncTicking @@ -348,7 +348,7 @@ DECLINLINE(uint64_t) tmVirtualGet(PVM pVM, bool fCheckTimers) ) { STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF); - Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); + Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_PENDING(pVCpuDst, VMCPU_FF_TIMER))); VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); #ifdef IN_RING3 # ifdef VBOX_WITH_REM @@ -505,7 +505,7 @@ DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVM pVM, uint64_t u64, VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); - Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); + Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_PENDING(pVCpuDst, VMCPU_FF_TIMER))); Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock); @@ -593,7 +593,7 @@ DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVM pVM, uint64_t u64, uint64_t *pcN VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); - Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, !!VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); + Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, !!VMCPU_FF_IS_PENDING(pVCpuDst, VMCPU_FF_TIMER))); Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock); @@ -648,7 +648,7 @@ DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVM pVM, bool fCheckTimers, uint64_t *pc if (fCheckTimers) { PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; - if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER) + if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER) && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64) { Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__)); @@ -818,9 +818,9 @@ DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVM pVM, bool fCheckTimers, uint64_t *pc if (u64 >= u64Expire) { PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; - if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) + if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) { - Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); + Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_IS_PENDING(pVCpuDst, VMCPU_FF_TIMER))); VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */ VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); #ifdef IN_RING3 diff --git a/src/VBox/VMM/VMMAll/TRPMAll.cpp b/src/VBox/VMM/VMMAll/TRPMAll.cpp index b7a93301..f2b74cb7 100644 --- a/src/VBox/VMM/VMMAll/TRPMAll.cpp +++ b/src/VBox/VMM/VMMAll/TRPMAll.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -23,9 +23,11 @@ #include <VBox/vmm/trpm.h> #include <VBox/vmm/pgm.h> #include <VBox/vmm/mm.h> +#include <VBox/vmm/hm.h> #include <VBox/vmm/patm.h> #include <VBox/vmm/selm.h> #include <VBox/vmm/stam.h> +#include <VBox/vmm/dbgf.h> #include "TRPMInternal.h" #include <VBox/vmm/vm.h> #include <VBox/err.h> @@ -47,9 +49,9 @@ * @returns VBox status code. * @param pVCpu Pointer to the VMCPU. * @param pu8TrapNo Where to store the trap number. - * @param pEnmType Where to store the trap type + * @param penmType Where to store the trap type */ -VMMDECL(int) TRPMQueryTrap(PVMCPU pVCpu, uint8_t *pu8TrapNo, TRPMEVENT *pEnmType) +VMMDECL(int) TRPMQueryTrap(PVMCPU pVCpu, uint8_t *pu8TrapNo, TRPMEVENT *penmType) { /* * Check if we have a trap at present. @@ -58,8 +60,8 @@ VMMDECL(int) TRPMQueryTrap(PVMCPU pVCpu, uint8_t *pu8TrapNo, TRPMEVENT *pEnmTyp { if (pu8TrapNo) *pu8TrapNo = (uint8_t)pVCpu->trpm.s.uActiveVector; - if (pEnmType) - *pEnmType = pVCpu->trpm.s.enmActiveType; + if (penmType) + *penmType = pVCpu->trpm.s.enmActiveType; return VINF_SUCCESS; } @@ -76,7 +78,7 @@ VMMDECL(int) TRPMQueryTrap(PVMCPU pVCpu, uint8_t *pu8TrapNo, TRPMEVENT *pEnmTyp * @returns The current trap number. * @param pVCpu Pointer to the VMCPU. */ -VMMDECL(uint8_t) TRPMGetTrapNo(PVMCPU pVCpu) +VMMDECL(uint8_t) TRPMGetTrapNo(PVMCPU pVCpu) { AssertMsg(pVCpu->trpm.s.uActiveVector != ~0U, ("No active trap!\n")); return (uint8_t)pVCpu->trpm.s.uActiveVector; @@ -92,19 +94,19 @@ VMMDECL(uint8_t) TRPMGetTrapNo(PVMCPU pVCpu) * @returns Error code. * @param pVCpu Pointer to the VMCPU. */ -VMMDECL(RTGCUINT) TRPMGetErrorCode(PVMCPU pVCpu) +VMMDECL(RTGCUINT) TRPMGetErrorCode(PVMCPU pVCpu) { AssertMsg(pVCpu->trpm.s.uActiveVector != ~0U, ("No active trap!\n")); #ifdef VBOX_STRICT switch (pVCpu->trpm.s.uActiveVector) { - case 0x0a: - case 0x0b: - case 0x0c: - case 0x0d: - case 0x0e: - case 0x11: - case 0x08: + case X86_XCPT_TS: + case X86_XCPT_NP: + case X86_XCPT_SS: + case X86_XCPT_GP: + case X86_XCPT_PF: + case X86_XCPT_AC: + case X86_XCPT_DF: break; default: AssertMsgFailed(("This trap (%#x) doesn't have any error code\n", pVCpu->trpm.s.uActiveVector)); @@ -127,12 +129,29 @@ VMMDECL(RTGCUINT) TRPMGetErrorCode(PVMCPU pVCpu) VMMDECL(RTGCUINTPTR) TRPMGetFaultAddress(PVMCPU pVCpu) { AssertMsg(pVCpu->trpm.s.uActiveVector != ~0U, ("No active trap!\n")); - AssertMsg(pVCpu->trpm.s.uActiveVector == 0xe, ("Not trap 0e!\n")); + AssertMsg(pVCpu->trpm.s.uActiveVector == X86_XCPT_PF, ("Not page-fault trap!\n")); return pVCpu->trpm.s.uActiveCR2; } /** + * Gets the instruction-length for the current trap (only relevant for software + * interrupts and software exceptions #BP and #OF). + * + * The caller is responsible for making sure there is an active trap 0x0e when + * making this request. + * + * @returns Fault address associated with the trap. + * @param pVCpu Pointer to the VMCPU. + */ +VMMDECL(uint8_t) TRPMGetInstrLength(PVMCPU pVCpu) +{ + AssertMsg(pVCpu->trpm.s.uActiveVector != ~0U, ("No active trap!\n")); + return pVCpu->trpm.s.cbInstr; +} + + +/** * Clears the current active trap/exception/interrupt. * * The caller is responsible for making sure there is an active trap @@ -171,7 +190,7 @@ VMMDECL(int) TRPMResetTrap(PVMCPU pVCpu) * @param u8TrapNo The trap vector to assert. * @param enmType Trap type. */ -VMMDECL(int) TRPMAssertTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType) +VMMDECL(int) TRPMAssertTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType) { Log2(("TRPMAssertTrap: u8TrapNo=%02x type=%d\n", u8TrapNo, enmType)); @@ -188,6 +207,40 @@ VMMDECL(int) TRPMAssertTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType) pVCpu->trpm.s.enmActiveType = enmType; pVCpu->trpm.s.uActiveErrorCode = ~(RTGCUINT)0; pVCpu->trpm.s.uActiveCR2 = 0xdeadface; + pVCpu->trpm.s.cbInstr = UINT8_MAX; + return VINF_SUCCESS; +} + + +/** + * Assert a page-fault exception. + * + * The caller is responsible for making sure there is no active trap + * when making this request. + * + * @returns VBox status code. + * @param pVCpu Pointer to the VMCPU. + * @param uCR2 The new fault address. + * @param uErrorCode The error code for the page-fault. + */ +VMMDECL(int) TRPMAssertXcptPF(PVMCPU pVCpu, RTGCUINTPTR uCR2, RTGCUINT uErrorCode) +{ + Log2(("TRPMAssertXcptPF: uCR2=%RGv uErrorCode=%RGv\n", uCR2, uErrorCode)); /** @todo RTGCUINT to be fixed. */ + + /* + * Cannot assert a trap when one is already active. + */ + if (pVCpu->trpm.s.uActiveVector != ~0U) + { + AssertMsgFailed(("CPU%d: Active trap %#x\n", pVCpu->idCpu, pVCpu->trpm.s.uActiveVector)); + return VERR_TRPM_ACTIVE_TRAP; + } + + pVCpu->trpm.s.uActiveVector = X86_XCPT_PF; + pVCpu->trpm.s.enmActiveType = TRPM_TRAP; + pVCpu->trpm.s.uActiveErrorCode = uErrorCode; + pVCpu->trpm.s.uActiveCR2 = uCR2; + pVCpu->trpm.s.cbInstr = UINT8_MAX; return VINF_SUCCESS; } @@ -202,7 +255,7 @@ VMMDECL(int) TRPMAssertTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType) * @param pVCpu Pointer to the VMCPU. * @param uErrorCode The new error code. */ -VMMDECL(void) TRPMSetErrorCode(PVMCPU pVCpu, RTGCUINT uErrorCode) +VMMDECL(void) TRPMSetErrorCode(PVMCPU pVCpu, RTGCUINT uErrorCode) { Log2(("TRPMSetErrorCode: uErrorCode=%RGv\n", uErrorCode)); /** @todo RTGCUINT mess! */ AssertMsg(pVCpu->trpm.s.uActiveVector != ~0U, ("No active trap!\n")); @@ -210,10 +263,10 @@ VMMDECL(void) TRPMSetErrorCode(PVMCPU pVCpu, RTGCUINT uErrorCode) #ifdef VBOX_STRICT switch (pVCpu->trpm.s.uActiveVector) { - case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e: + case X86_XCPT_TS: case X86_XCPT_NP: case X86_XCPT_SS: case X86_XCPT_GP: case X86_XCPT_PF: AssertMsg(uErrorCode != ~(RTGCUINT)0, ("Invalid uErrorCode=%#x u8TrapNo=%d\n", uErrorCode, pVCpu->trpm.s.uActiveVector)); break; - case 0x11: case 0x08: + case X86_XCPT_AC: case X86_XCPT_DF: AssertMsg(uErrorCode == 0, ("Invalid uErrorCode=%#x u8TrapNo=%d\n", uErrorCode, pVCpu->trpm.s.uActiveVector)); break; default: @@ -225,8 +278,8 @@ VMMDECL(void) TRPMSetErrorCode(PVMCPU pVCpu, RTGCUINT uErrorCode) /** - * Sets the error code of the current trap. - * (This function is for use in trap handlers and such.) + * Sets the fault address of the current #PF trap. (This function is for use in + * trap handlers and such.) * * The caller is responsible for making sure there is an active trap 0e * when making this request. @@ -234,16 +287,39 @@ VMMDECL(void) TRPMSetErrorCode(PVMCPU pVCpu, RTGCUINT uErrorCode) * @param pVCpu Pointer to the VMCPU. * @param uCR2 The new fault address (cr2 register). */ -VMMDECL(void) TRPMSetFaultAddress(PVMCPU pVCpu, RTGCUINTPTR uCR2) +VMMDECL(void) TRPMSetFaultAddress(PVMCPU pVCpu, RTGCUINTPTR uCR2) { Log2(("TRPMSetFaultAddress: uCR2=%RGv\n", uCR2)); AssertMsg(pVCpu->trpm.s.uActiveVector != ~0U, ("No active trap!\n")); - AssertMsg(pVCpu->trpm.s.uActiveVector == 0xe, ("Not trap 0e!\n")); + AssertMsg(pVCpu->trpm.s.uActiveVector == X86_XCPT_PF, ("Not trap 0e!\n")); pVCpu->trpm.s.uActiveCR2 = uCR2; } /** + * Sets the instruction-length of the current trap (relevant for software + * interrupts and software exceptions like #BP, #OF). + * + * The caller is responsible for making sure there is an active trap 0e + * when making this request. + * + * @param pVCpu Pointer to the VMCPU. + * @param cbInstr The instruction length. + */ +VMMDECL(void) TRPMSetInstrLength(PVMCPU pVCpu, uint8_t cbInstr) +{ + Log2(("TRPMSetInstrLength: cbInstr=%u\n", cbInstr)); + AssertMsg(pVCpu->trpm.s.uActiveVector != ~0U, ("No active trap!\n")); + AssertMsg( pVCpu->trpm.s.enmActiveType == TRPM_SOFTWARE_INT + || ( pVCpu->trpm.s.enmActiveType == TRPM_TRAP + && ( pVCpu->trpm.s.uActiveVector == X86_XCPT_BP + || pVCpu->trpm.s.uActiveVector == X86_XCPT_OF)), + ("Invalid trap type %#x\n", pVCpu->trpm.s.enmActiveType)); + pVCpu->trpm.s.cbInstr = cbInstr; +} + + +/** * Checks if the current active trap/interrupt/exception/fault/whatever is a software * interrupt or not. * @@ -267,7 +343,7 @@ VMMDECL(bool) TRPMIsSoftwareInterrupt(PVMCPU pVCpu) * @returns true if trap active, false if not. * @param pVCpu Pointer to the VMCPU. */ -VMMDECL(bool) TRPMHasTrap(PVMCPU pVCpu) +VMMDECL(bool) TRPMHasTrap(PVMCPU pVCpu) { return pVCpu->trpm.s.uActiveVector != ~0U; } @@ -284,8 +360,11 @@ VMMDECL(bool) TRPMHasTrap(PVMCPU pVCpu) * @param puErrorCode Where to store the error code associated with some traps. * ~0U is stored if the trap has no error code. * @param puCR2 Where to store the CR2 associated with a trap 0E. + * @param pcbInstr Where to store the instruction-length + * associated with some traps. */ -VMMDECL(int) TRPMQueryTrapAll(PVMCPU pVCpu, uint8_t *pu8TrapNo, TRPMEVENT *pEnmType, PRTGCUINT puErrorCode, PRTGCUINTPTR puCR2) +VMMDECL(int) TRPMQueryTrapAll(PVMCPU pVCpu, uint8_t *pu8TrapNo, TRPMEVENT *pEnmType, PRTGCUINT puErrorCode, PRTGCUINTPTR puCR2, + uint8_t *pcbInstr) { /* * Check if we have a trap at present. @@ -301,7 +380,8 @@ VMMDECL(int) TRPMQueryTrapAll(PVMCPU pVCpu, uint8_t *pu8TrapNo, TRPMEVENT *pEnm *puErrorCode = pVCpu->trpm.s.uActiveErrorCode; if (puCR2) *puCR2 = pVCpu->trpm.s.uActiveCR2; - + if (pcbInstr) + *pcbInstr = pVCpu->trpm.s.cbInstr; return VINF_SUCCESS; } @@ -321,6 +401,7 @@ VMMDECL(void) TRPMSaveTrap(PVMCPU pVCpu) pVCpu->trpm.s.enmSavedType = pVCpu->trpm.s.enmActiveType; pVCpu->trpm.s.uSavedErrorCode = pVCpu->trpm.s.uActiveErrorCode; pVCpu->trpm.s.uSavedCR2 = pVCpu->trpm.s.uActiveCR2; + pVCpu->trpm.s.cbSavedInstr = pVCpu->trpm.s.cbInstr; } @@ -337,6 +418,7 @@ VMMDECL(void) TRPMRestoreTrap(PVMCPU pVCpu) pVCpu->trpm.s.enmActiveType = pVCpu->trpm.s.enmSavedType; pVCpu->trpm.s.uActiveErrorCode = pVCpu->trpm.s.uSavedErrorCode; pVCpu->trpm.s.uActiveCR2 = pVCpu->trpm.s.uSavedCR2; + pVCpu->trpm.s.cbInstr = pVCpu->trpm.s.cbSavedInstr; } @@ -360,6 +442,7 @@ VMMDECL(void) TRPMRestoreTrap(PVMCPU pVCpu) VMMDECL(int) TRPMForwardTrap(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t iGate, uint32_t cbInstr, TRPMERRORCODE enmError, TRPMEVENT enmType, int32_t iOrgTrap) { + AssertReturn(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)), VERR_TRPM_HM_IPE); #ifdef TRPM_FORWARD_TRAPS_IN_GC PVM pVM = pVCpu->CTX_SUFF(pVM); X86EFLAGS eflags; @@ -410,7 +493,7 @@ VMMDECL(int) TRPMForwardTrap(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t iGat eflags.u32 = CPUMRawGetEFlags(pVCpu); /* VMCPU_FF_INHIBIT_INTERRUPTS should be cleared upfront or don't call this function at all for dispatching hardware interrupts. */ - Assert(enmType != TRPM_HARDWARE_INT || !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); + Assert(enmType != TRPM_HARDWARE_INT || !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); /* * If it's a real guest trap and the guest's page fault handler is marked as safe for GC execution, then we call it directly. @@ -433,7 +516,7 @@ VMMDECL(int) TRPMForwardTrap(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t iGat int rc; Assert(PATMAreInterruptsEnabledByCtxCore(pVM, pRegFrame)); - Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); + Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); if (GCPtrIDT && iGate * sizeof(VBOXIDTE) >= cbIDT) goto failure; @@ -592,7 +675,7 @@ VMMDECL(int) TRPMForwardTrap(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t iGat rc = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)pTrapStackGC - 10*sizeof(uint32_t), 10 * sizeof(uint32_t), X86_PTE_RW); pTrapStack = (uint32_t *)(uintptr_t)pTrapStackGC; #else - Assert(eflags.Bits.u1VM || (pRegFrame->ss.Sel & X86_SEL_RPL) == 0 || (pRegFrame->ss.Sel & X86_SEL_RPL) == 3); + Assert(eflags.Bits.u1VM || (pRegFrame->ss.Sel & X86_SEL_RPL) == 0 || (pRegFrame->ss.Sel & X86_SEL_RPL) == 3 || (EMIsRawRing1Enabled(pVM) && (pRegFrame->ss.Sel & X86_SEL_RPL) == 1)); /* Check maximum amount we need (10 when executing in V86 mode) */ if ((pTrapStackGC >> PAGE_SHIFT) != ((pTrapStackGC - 10*sizeof(uint32_t)) >> PAGE_SHIFT)) /* fail if we cross a page boundary */ goto failure; @@ -623,9 +706,15 @@ VMMDECL(int) TRPMForwardTrap(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t iGat if (!fConforming && dpl < cpl) { - if ((pRegFrame->ss.Sel & X86_SEL_RPL) == 1 && !eflags.Bits.u1VM) - pTrapStack[--idx] = pRegFrame->ss.Sel & ~1; /* Mask away traces of raw ring execution (ring 1). */ +#ifdef IN_RC /* Only in RC we still see tracing of our ring modifications. */ + if ( (pRegFrame->ss.Sel & X86_SEL_RPL) == 1 + && !eflags.Bits.u1VM) + pTrapStack[--idx] = pRegFrame->ss.Sel & ~1; /* Mask away traces of raw ring 0 execution (ring 1). */ + else if ( EMIsRawRing1Enabled(pVM) + && (pRegFrame->ss.Sel & X86_SEL_RPL) == 2) + pTrapStack[--idx] = (pRegFrame->ss.Sel & ~2) | 1; /* Mask away traces of raw ring 1 execution (ring 2). */ else +#endif /* IN_RC */ pTrapStack[--idx] = pRegFrame->ss.Sel; pTrapStack[--idx] = pRegFrame->esp; @@ -635,9 +724,15 @@ VMMDECL(int) TRPMForwardTrap(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t iGat /* Note: Not really necessary as we grab include those bits in the trap/irq handler trampoline */ pTrapStack[--idx] = eflags.u32; - if ((pRegFrame->cs.Sel & X86_SEL_RPL) == 1 && !eflags.Bits.u1VM) - pTrapStack[--idx] = pRegFrame->cs.Sel & ~1; /* Mask away traces of raw ring execution (ring 1). */ +#ifdef IN_RC /* Only in RC mode we still see tracing of our ring modifications */ + if ( (pRegFrame->cs.Sel & X86_SEL_RPL) == 1 + && !eflags.Bits.u1VM) + pTrapStack[--idx] = pRegFrame->cs.Sel & ~1; /* Mask away traces of raw ring execution (ring 1). */ + else if ( EMIsRawRing1Enabled(pVM) + && (pRegFrame->cs.Sel & X86_SEL_RPL) == 2) + pTrapStack[--idx] = (pRegFrame->cs.Sel & ~2) | 1; /* Mask away traces of raw ring 1 execution (ring 2). */ else +#endif /* IN_RC */ pTrapStack[--idx] = pRegFrame->cs.Sel; if (enmType == TRPM_SOFTWARE_INT) @@ -659,6 +754,10 @@ VMMDECL(int) TRPMForwardTrap(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t iGat /* Mask away dangerous flags for the trap/interrupt handler. */ eflags.u32 &= ~(X86_EFL_TF | X86_EFL_VM | X86_EFL_RF | X86_EFL_NT); +#ifdef DEBUG + if (DBGFIsStepping(pVCpu)) + eflags.u32 |= X86_EFL_TF; +#endif /* Turn off interrupts for interrupt gates. */ if (GuestIdte.Gen.u5Type2 == VBOX_IDTE_TYPE2_INT_32) @@ -668,7 +767,7 @@ VMMDECL(int) TRPMForwardTrap(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t iGat #ifdef DEBUG for (int j = idx; j < 0; j++) - Log4(("Stack %RRv pos %02d: %08x\n", &pTrapStack[j], j, pTrapStack[j])); + LogFlow(("Stack %RRv pos %02d: %08x\n", &pTrapStack[j], j, pTrapStack[j])); Log4(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n" "eip=%08x esp=%08x ebp=%08x iopl=%d\n" @@ -770,6 +869,7 @@ VMMDECL(int) TRPMRaiseXcpt(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, X86XCPT enmXcpt) pVCpu->trpm.s.enmActiveType = TRPM_TRAP; pVCpu->trpm.s.uActiveErrorCode = 0xdeadbeef; pVCpu->trpm.s.uActiveCR2 = 0xdeadface; + pVCpu->trpm.s.cbInstr = UINT8_MAX; return VINF_EM_RAW_GUEST_TRAP; } @@ -797,6 +897,7 @@ VMMDECL(int) TRPMRaiseXcptErr(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, X86XCPT enmXc pVCpu->trpm.s.enmActiveType = TRPM_TRAP; pVCpu->trpm.s.uActiveErrorCode = uErr; pVCpu->trpm.s.uActiveCR2 = 0xdeadface; + pVCpu->trpm.s.cbInstr = UINT8_MAX; return VINF_EM_RAW_GUEST_TRAP; } @@ -825,10 +926,12 @@ VMMDECL(int) TRPMRaiseXcptErrCR2(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, X86XCPT en pVCpu->trpm.s.enmActiveType = TRPM_TRAP; pVCpu->trpm.s.uActiveErrorCode = uErr; pVCpu->trpm.s.uActiveCR2 = uCR2; + pVCpu->trpm.s.cbInstr = UINT8_MAX; return VINF_EM_RAW_GUEST_TRAP; } +#ifdef VBOX_WITH_RAW_MODE /** * Clear guest trap/interrupt gate handler * @@ -838,23 +941,18 @@ VMMDECL(int) TRPMRaiseXcptErrCR2(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, X86XCPT en */ VMMDECL(int) trpmClearGuestTrapHandler(PVM pVM, unsigned iTrap) { - /* - * Validate. - */ - if (iTrap >= RT_ELEMENTS(pVM->trpm.s.aIdt)) - { - AssertMsg(iTrap < TRPM_HANDLER_INT_BASE, ("Illegal gate number %d!\n", iTrap)); - return VERR_INVALID_PARAMETER; - } + AssertReturn(!HMIsEnabled(pVM), VERR_TRPM_HM_IPE); + AssertMsgReturn(iTrap < RT_ELEMENTS(pVM->trpm.s.aIdt), ("Illegal gate number %d!\n", iTrap), VERR_INVALID_PARAMETER); if (ASMBitTest(&pVM->trpm.s.au32IdtPatched[0], iTrap)) -#ifdef IN_RING3 +# ifdef IN_RING3 trpmR3ClearPassThroughHandler(pVM, iTrap); -#else +# else AssertFailed(); -#endif +# endif pVM->trpm.s.aGuestTrapHandler[iTrap] = TRPM_INVALID_HANDLER; return VINF_SUCCESS; } +#endif /* VBOX_WITH_RAW_MODE */ diff --git a/src/VBox/VMM/VMMAll/VMAll.cpp b/src/VBox/VMM/VMMAll/VMAll.cpp index 15740626..2d6a5ad9 100644 --- a/src/VBox/VMM/VMMAll/VMAll.cpp +++ b/src/VBox/VMM/VMMAll/VMAll.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2007 Oracle Corporation + * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; diff --git a/src/VBox/VMM/VMMAll/VMMAll.cpp b/src/VBox/VMM/VMMAll/VMMAll.cpp index 583991ce..09193153 100644 --- a/src/VBox/VMM/VMMAll/VMMAll.cpp +++ b/src/VBox/VMM/VMMAll/VMMAll.cpp @@ -4,7 +4,7 @@ */ /* - * Copyright (C) 2006-2012 Oracle Corporation + * Copyright (C) 2006-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; @@ -93,9 +93,9 @@ static DECLCALLBACK(size_t) vmmFormatTypeVmCpuSet(PFNRTSTROUTPUT pfnOutput, void cCpus = 0; } if (cCpus == 0) - return pfnOutput(pvArgOutput, "<empty>", sizeof("<empty>") - 1); + return pfnOutput(pvArgOutput, RT_STR_TUPLE("<empty>")); if (cCpus == RT_ELEMENTS(pSet->au32Bitmap) * 32) - return pfnOutput(pvArgOutput, "<full>", sizeof("<full>") - 1); + return pfnOutput(pvArgOutput, RT_STR_TUPLE("<full>")); /* * Print cpus that are present: {1,2,7,9 ... } @@ -173,7 +173,7 @@ void vmmTermFormatTypes(void) * @returns bottom of the stack. * @param pVCpu Pointer to the VMCPU. */ -VMMDECL(RTRCPTR) VMMGetStackRC(PVMCPU pVCpu) +VMM_INT_DECL(RTRCPTR) VMMGetStackRC(PVMCPU pVCpu) { return (RTRCPTR)pVCpu->vmm.s.pbEMTStackBottomRC; } @@ -185,6 +185,7 @@ VMMDECL(RTRCPTR) VMMGetStackRC(PVMCPU pVCpu) * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT. * * @param pVM Pointer to the VM. + * @internal */ VMMDECL(VMCPUID) VMMGetCpuId(PVM pVM) { @@ -198,16 +199,21 @@ VMMDECL(VMCPUID) VMMGetCpuId(PVM pVM) /* Search first by host cpu id (most common case) * and then by native thread id (page fusion case). */ - /* RTMpCpuId had better be cheap. */ - RTCPUID idHostCpu = RTMpCpuId(); - - /** @todo optimize for large number of VCPUs when that becomes more common. */ - for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) + if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD)) { - PVMCPU pVCpu = &pVM->aCpus[idCpu]; + /** @todo r=ramshankar: This doesn't buy us anything in terms of performance + * leaving it here for hysterical raisins and as a reference if we + * implemented a hashing approach in the future. */ + RTCPUID idHostCpu = RTMpCpuId(); - if (pVCpu->idHostCpu == idHostCpu) - return pVCpu->idCpu; + /** @todo optimize for large number of VCPUs when that becomes more common. */ + for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) + { + PVMCPU pVCpu = &pVM->aCpus[idCpu]; + + if (pVCpu->idHostCpu == idHostCpu) + return pVCpu->idCpu; + } } /* RTThreadGetNativeSelf had better be cheap. */ @@ -236,6 +242,7 @@ VMMDECL(VMCPUID) VMMGetCpuId(PVM pVM) * @returns The VMCPU pointer. NULL if not an EMT. * * @param pVM Pointer to the VM. + * @internal */ VMMDECL(PVMCPU) VMMGetCpu(PVM pVM) { @@ -250,20 +257,25 @@ VMMDECL(PVMCPU) VMMGetCpu(PVM pVM) if (pVM->cCpus == 1) return &pVM->aCpus[0]; - /* Search first by host cpu id (most common case) + /* + * Search first by host cpu id (most common case) * and then by native thread id (page fusion case). */ - - /* RTMpCpuId had better be cheap. */ - RTCPUID idHostCpu = RTMpCpuId(); - - /** @todo optimize for large number of VCPUs when that becomes more common. */ - for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) + if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD)) { - PVMCPU pVCpu = &pVM->aCpus[idCpu]; + /** @todo r=ramshankar: This doesn't buy us anything in terms of performance + * leaving it here for hysterical raisins and as a reference if we + * implemented a hashing approach in the future. */ + RTCPUID idHostCpu = RTMpCpuId(); - if (pVCpu->idHostCpu == idHostCpu) - return pVCpu; + /** @todo optimize for large number of VCPUs when that becomes more common. */ + for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) + { + PVMCPU pVCpu = &pVM->aCpus[idCpu]; + + if (pVCpu->idHostCpu == idHostCpu) + return pVCpu; + } } /* RTThreadGetNativeSelf had better be cheap. */ @@ -290,6 +302,7 @@ VMMDECL(PVMCPU) VMMGetCpu(PVM pVM) * * @returns The VMCPU pointer. * @param pVM Pointer to the VM. + * @internal */ VMMDECL(PVMCPU) VMMGetCpu0(PVM pVM) { @@ -305,6 +318,7 @@ VMMDECL(PVMCPU) VMMGetCpu0(PVM pVM) * * @param pVM Pointer to the VM. * @param idCpu The ID of the virtual CPU. + * @internal */ VMMDECL(PVMCPU) VMMGetCpuById(PVM pVM, RTCPUID idCpu) { @@ -321,7 +335,7 @@ VMMDECL(PVMCPU) VMMGetCpuById(PVM pVM, RTCPUID idCpu) * * @returns VBOX_SVN_REV. */ -VMMDECL(uint32_t) VMMGetSvnRev(void) +VMM_INT_DECL(uint32_t) VMMGetSvnRev(void) { return VBOX_SVN_REV; } @@ -333,8 +347,43 @@ VMMDECL(uint32_t) VMMGetSvnRev(void) * @returns active switcher * @param pVM Pointer to the VM. */ -VMMDECL(VMMSWITCHER) VMMGetSwitcher(PVM pVM) +VMM_INT_DECL(VMMSWITCHER) VMMGetSwitcher(PVM pVM) { return pVM->vmm.s.enmSwitcher; } + +/** + * Checks whether we're in a ring-3 call or not. + * + * @returns true / false. + * @param pVCpu The caller's cross context VM structure. + * @thread EMT + */ +VMM_INT_DECL(bool) VMMIsInRing3Call(PVMCPU pVCpu) +{ +#ifdef RT_ARCH_X86 + return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call; +#else + return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call; +#endif +} + + +/** + * Returns the build type for matching components. + * + * @returns Build type value. + */ +uint32_t vmmGetBuildType(void) +{ + uint32_t uRet = 0xbeef0000; +#ifdef DEBUG + uRet |= RT_BIT_32(0); +#endif +#ifdef VBOX_WITH_STATISTICS + uRet |= RT_BIT_32(1); +#endif + return uRet; +} + diff --git a/src/VBox/VMM/VMMAll/VMMAllA.asm b/src/VBox/VMM/VMMAll/VMMAllA.asm index dae37ada..7d07774c 100644 --- a/src/VBox/VMM/VMMAll/VMMAllA.asm +++ b/src/VBox/VMM/VMMAll/VMMAllA.asm @@ -4,7 +4,7 @@ ; ; -; Copyright (C) 2009 Oracle Corporation +; Copyright (C) 2009-2010 Oracle Corporation ; ; This file is part of VirtualBox Open Source Edition (OSE), as ; available from http://www.virtualbox.org. This file is free software; |
