summaryrefslogtreecommitdiff
path: root/src/VBox/VMM/include
diff options
context:
space:
mode:
Diffstat (limited to 'src/VBox/VMM/include')
-rw-r--r--src/VBox/VMM/include/CFGMInternal.h2
-rw-r--r--src/VBox/VMM/include/CPUMInternal.h782
-rw-r--r--src/VBox/VMM/include/CPUMInternal.mac63
-rw-r--r--src/VBox/VMM/include/CSAMInternal.h2
-rw-r--r--src/VBox/VMM/include/DBGFInternal.h112
-rw-r--r--src/VBox/VMM/include/EMHandleRCTmpl.h47
-rw-r--r--src/VBox/VMM/include/EMInternal.h48
-rw-r--r--src/VBox/VMM/include/HMInternal.h1002
-rw-r--r--src/VBox/VMM/include/HMInternal.mac (renamed from src/VBox/VMM/include/HWACCMInternal.mac)19
-rw-r--r--src/VBox/VMM/include/HWACCMInternal.h906
-rw-r--r--src/VBox/VMM/include/IEMInternal.h211
-rw-r--r--src/VBox/VMM/include/IOMInline.h53
-rw-r--r--src/VBox/VMM/include/IOMInternal.h88
-rw-r--r--src/VBox/VMM/include/MMInternal.h2
-rw-r--r--src/VBox/VMM/include/PATMA.h3
-rw-r--r--src/VBox/VMM/include/PATMInternal.h214
-rw-r--r--src/VBox/VMM/include/PDMAsyncCompletionFileInternal.h12
-rw-r--r--src/VBox/VMM/include/PDMAsyncCompletionInternal.h54
-rw-r--r--src/VBox/VMM/include/PDMBlkCacheInternal.h2
-rw-r--r--src/VBox/VMM/include/PDMInternal.h182
-rw-r--r--src/VBox/VMM/include/PDMNetShaperInternal.h46
-rw-r--r--src/VBox/VMM/include/PGMGstDefs.h2
-rw-r--r--src/VBox/VMM/include/PGMInline.h15
-rw-r--r--src/VBox/VMM/include/PGMInternal.h202
-rw-r--r--src/VBox/VMM/include/REMInternal.h4
-rw-r--r--src/VBox/VMM/include/SELMInline.h316
-rw-r--r--src/VBox/VMM/include/SELMInternal.h312
-rw-r--r--src/VBox/VMM/include/SSMInternal.h4
-rw-r--r--src/VBox/VMM/include/STAMInternal.h62
-rw-r--r--src/VBox/VMM/include/TMInternal.h2
-rw-r--r--src/VBox/VMM/include/TRPMInternal.h34
-rw-r--r--src/VBox/VMM/include/TRPMInternal.mac8
-rw-r--r--src/VBox/VMM/include/VMInternal.h9
-rw-r--r--src/VBox/VMM/include/VMMInternal.h52
-rw-r--r--src/VBox/VMM/include/VMMInternal.mac12
-rw-r--r--src/VBox/VMM/include/VMMSwitcher.h4
-rw-r--r--src/VBox/VMM/include/VMMSwitcher.mac19
-rw-r--r--src/VBox/VMM/include/internal/em.h28
-rw-r--r--src/VBox/VMM/include/internal/pgm.h9
-rw-r--r--src/VBox/VMM/include/internal/vm.h26
40 files changed, 3159 insertions, 1811 deletions
diff --git a/src/VBox/VMM/include/CFGMInternal.h b/src/VBox/VMM/include/CFGMInternal.h
index 2d33bc6d..7abced2f 100644
--- a/src/VBox/VMM/include/CFGMInternal.h
+++ b/src/VBox/VMM/include/CFGMInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2010 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
diff --git a/src/VBox/VMM/include/CPUMInternal.h b/src/VBox/VMM/include/CPUMInternal.h
index 6d582f9e..b2fd41c4 100644
--- a/src/VBox/VMM/include/CPUMInternal.h
+++ b/src/VBox/VMM/include/CPUMInternal.h
@@ -21,6 +21,7 @@
#ifndef VBOX_FOR_DTRACE_LIB
# include <VBox/cdefs.h>
# include <VBox/types.h>
+# include <VBox/vmm/stam.h>
# include <iprt/x86.h>
#else
# pragma D depends_on library x86.d
@@ -56,29 +57,49 @@
/** Use flags (CPUM::fUseFlags).
- * (Don't forget to sync this with CPUMInternal.mac!)
+ * (Don't forget to sync this with CPUMInternal.mac !)
* @{ */
/** Used the FPU, SSE or such stuff. */
#define CPUM_USED_FPU RT_BIT(0)
/** Used the FPU, SSE or such stuff since last we were in REM.
* REM syncing is clearing this, lazy FPU is setting it. */
#define CPUM_USED_FPU_SINCE_REM RT_BIT(1)
+/** The XMM state was manually restored. (AMD only) */
+#define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2)
+
/** Host OS is using SYSENTER and we must NULL the CS. */
-#define CPUM_USE_SYSENTER RT_BIT(2)
+#define CPUM_USE_SYSENTER RT_BIT(3)
/** Host OS is using SYSENTER and we must NULL the CS. */
-#define CPUM_USE_SYSCALL RT_BIT(3)
-/** Debug registers are used by host and must be disabled. */
-#define CPUM_USE_DEBUG_REGS_HOST RT_BIT(4)
-/** Enabled use of debug registers in guest context. */
-#define CPUM_USE_DEBUG_REGS RT_BIT(5)
-/** The XMM state was manually restored. (AMD only) */
-#define CPUM_MANUAL_XMM_RESTORE RT_BIT(6)
-/** Sync the FPU state on entry (32->64 switcher only). */
-#define CPUM_SYNC_FPU_STATE RT_BIT(7)
-/** Sync the debug state on entry (32->64 switcher only). */
-#define CPUM_SYNC_DEBUG_STATE RT_BIT(8)
-/** Enabled use of hypervisor debug registers in guest context. */
-#define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(9)
+#define CPUM_USE_SYSCALL RT_BIT(4)
+
+/** Debug registers are used by host and that DR7 and DR6 must be saved and
+ * disabled when switching to raw-mode. */
+#define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5)
+/** Records that we've saved the host DRx registers.
+ * In ring-0 this means all (DR0-7), while in raw-mode context this means DR0-3
+ * since DR6 and DR7 are covered by CPUM_USE_DEBUG_REGS_HOST. */
+#define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6)
+/** Set to indicate that we should save host DR0-7 and load the hypervisor debug
+ * registers in the raw-mode world switchers. (See CPUMRecalcHyperDRx.) */
+#define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7)
+/** Used in ring-0 to indicate that we have loaded the hypervisor debug
+ * registers. */
+#define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8)
+/** Used in ring-0 to indicate that we have loaded the guest debug
+ * registers (DR0-3 and maybe DR6) for direct use by the guest.
+ * DR7 (and AMD-V DR6) are handled via the VMCB. */
+#define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9)
+
+
+/** Sync the FPU state on next entry (32->64 switcher only). */
+#define CPUM_SYNC_FPU_STATE RT_BIT(16)
+/** Sync the debug state on next entry (32->64 switcher only). */
+#define CPUM_SYNC_DEBUG_REGS_GUEST RT_BIT(17)
+/** Sync the debug state on next entry (32->64 switcher only).
+ * Almost the same as CPUM_USE_DEBUG_REGS_HYPER in the raw-mode switchers. */
+#define CPUM_SYNC_DEBUG_REGS_HYPER RT_BIT(18)
+/** Host CPU requires fxsave/fxrstor leaky bit handling. */
+#define CPUM_USE_FFXSR_LEAKY RT_BIT(19)
/** @} */
/* Sanity check. */
@@ -90,6 +111,672 @@
/**
+ * MSR read functions.
+ */
+typedef enum CPUMMSRRDFN
+{
+ /** Invalid zero value. */
+ kCpumMsrRdFn_Invalid = 0,
+ /** Return the CPUMMSRRANGE::uValue. */
+ kCpumMsrRdFn_FixedValue,
+ /** Alias to the MSR range starting at the MSR given by
+ * CPUMMSRRANGE::uValue. Must be used in pair with
+ * kCpumMsrWrFn_MsrAlias. */
+ kCpumMsrRdFn_MsrAlias,
+ /** Write only register, GP all read attempts. */
+ kCpumMsrRdFn_WriteOnly,
+
+ kCpumMsrRdFn_Ia32P5McAddr,
+ kCpumMsrRdFn_Ia32P5McType,
+ kCpumMsrRdFn_Ia32TimestampCounter,
+ kCpumMsrRdFn_Ia32PlatformId, /**< Takes real CPU value for reference. */
+ kCpumMsrRdFn_Ia32ApicBase,
+ kCpumMsrRdFn_Ia32FeatureControl,
+ kCpumMsrRdFn_Ia32BiosSignId, /**< Range value returned. */
+ kCpumMsrRdFn_Ia32SmmMonitorCtl,
+ kCpumMsrRdFn_Ia32PmcN,
+ kCpumMsrRdFn_Ia32MonitorFilterLineSize,
+ kCpumMsrRdFn_Ia32MPerf,
+ kCpumMsrRdFn_Ia32APerf,
+ kCpumMsrRdFn_Ia32MtrrCap, /**< Takes real CPU value for reference. */
+ kCpumMsrRdFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
+ kCpumMsrRdFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
+ kCpumMsrRdFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
+ kCpumMsrRdFn_Ia32MtrrDefType,
+ kCpumMsrRdFn_Ia32Pat,
+ kCpumMsrRdFn_Ia32SysEnterCs,
+ kCpumMsrRdFn_Ia32SysEnterEsp,
+ kCpumMsrRdFn_Ia32SysEnterEip,
+ kCpumMsrRdFn_Ia32McgCap,
+ kCpumMsrRdFn_Ia32McgStatus,
+ kCpumMsrRdFn_Ia32McgCtl,
+ kCpumMsrRdFn_Ia32DebugCtl,
+ kCpumMsrRdFn_Ia32SmrrPhysBase,
+ kCpumMsrRdFn_Ia32SmrrPhysMask,
+ kCpumMsrRdFn_Ia32PlatformDcaCap,
+ kCpumMsrRdFn_Ia32CpuDcaCap,
+ kCpumMsrRdFn_Ia32Dca0Cap,
+ kCpumMsrRdFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
+ kCpumMsrRdFn_Ia32PerfStatus, /**< Range value returned. */
+ kCpumMsrRdFn_Ia32PerfCtl, /**< Range value returned. */
+ kCpumMsrRdFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
+ kCpumMsrRdFn_Ia32PerfCapabilities, /**< Takes reference value. */
+ kCpumMsrRdFn_Ia32FixedCtrCtrl,
+ kCpumMsrRdFn_Ia32PerfGlobalStatus, /**< Takes reference value. */
+ kCpumMsrRdFn_Ia32PerfGlobalCtrl,
+ kCpumMsrRdFn_Ia32PerfGlobalOvfCtrl,
+ kCpumMsrRdFn_Ia32PebsEnable,
+ kCpumMsrRdFn_Ia32ClockModulation, /**< Range value returned. */
+ kCpumMsrRdFn_Ia32ThermInterrupt, /**< Range value returned. */
+ kCpumMsrRdFn_Ia32ThermStatus, /**< Range value returned. */
+ kCpumMsrRdFn_Ia32Therm2Ctl, /**< Range value returned. */
+ kCpumMsrRdFn_Ia32MiscEnable, /**< Range value returned. */
+ kCpumMsrRdFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
+ kCpumMsrRdFn_Ia32McNCtl2, /**< Takes register number of start of range. */
+ kCpumMsrRdFn_Ia32DsArea,
+ kCpumMsrRdFn_Ia32TscDeadline,
+ kCpumMsrRdFn_Ia32X2ApicN,
+ kCpumMsrRdFn_Ia32DebugInterface,
+ kCpumMsrRdFn_Ia32VmxBase, /**< Takes real value as reference. */
+ kCpumMsrRdFn_Ia32VmxPinbasedCtls, /**< Takes real value as reference. */
+ kCpumMsrRdFn_Ia32VmxProcbasedCtls, /**< Takes real value as reference. */
+ kCpumMsrRdFn_Ia32VmxExitCtls, /**< Takes real value as reference. */
+ kCpumMsrRdFn_Ia32VmxEntryCtls, /**< Takes real value as reference. */
+ kCpumMsrRdFn_Ia32VmxMisc, /**< Takes real value as reference. */
+ kCpumMsrRdFn_Ia32VmxCr0Fixed0, /**< Takes real value as reference. */
+ kCpumMsrRdFn_Ia32VmxCr0Fixed1, /**< Takes real value as reference. */
+ kCpumMsrRdFn_Ia32VmxCr4Fixed0, /**< Takes real value as reference. */
+ kCpumMsrRdFn_Ia32VmxCr4Fixed1, /**< Takes real value as reference. */
+ kCpumMsrRdFn_Ia32VmxVmcsEnum, /**< Takes real value as reference. */
+ kCpumMsrRdFn_Ia32VmxProcBasedCtls2, /**< Takes real value as reference. */
+ kCpumMsrRdFn_Ia32VmxEptVpidCap, /**< Takes real value as reference. */
+ kCpumMsrRdFn_Ia32VmxTruePinbasedCtls, /**< Takes real value as reference. */
+ kCpumMsrRdFn_Ia32VmxTrueProcbasedCtls, /**< Takes real value as reference. */
+ kCpumMsrRdFn_Ia32VmxTrueExitCtls, /**< Takes real value as reference. */
+ kCpumMsrRdFn_Ia32VmxTrueEntryCtls, /**< Takes real value as reference. */
+
+ kCpumMsrRdFn_Amd64Efer,
+ kCpumMsrRdFn_Amd64SyscallTarget,
+ kCpumMsrRdFn_Amd64LongSyscallTarget,
+ kCpumMsrRdFn_Amd64CompSyscallTarget,
+ kCpumMsrRdFn_Amd64SyscallFlagMask,
+ kCpumMsrRdFn_Amd64FsBase,
+ kCpumMsrRdFn_Amd64GsBase,
+ kCpumMsrRdFn_Amd64KernelGsBase,
+ kCpumMsrRdFn_Amd64TscAux,
+
+ kCpumMsrRdFn_IntelEblCrPowerOn,
+ kCpumMsrRdFn_IntelI7CoreThreadCount,
+ kCpumMsrRdFn_IntelP4EbcHardPowerOn,
+ kCpumMsrRdFn_IntelP4EbcSoftPowerOn,
+ kCpumMsrRdFn_IntelP4EbcFrequencyId,
+ kCpumMsrRdFn_IntelP6FsbFrequency, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelPlatformInfo,
+ kCpumMsrRdFn_IntelFlexRatio, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelPkgCStConfigControl,
+ kCpumMsrRdFn_IntelPmgIoCaptureBase,
+ kCpumMsrRdFn_IntelLastBranchFromToN,
+ kCpumMsrRdFn_IntelLastBranchFromN,
+ kCpumMsrRdFn_IntelLastBranchToN,
+ kCpumMsrRdFn_IntelLastBranchTos,
+ kCpumMsrRdFn_IntelBblCrCtl,
+ kCpumMsrRdFn_IntelBblCrCtl3,
+ kCpumMsrRdFn_IntelI7TemperatureTarget, /**< Range value returned. */
+ kCpumMsrRdFn_IntelI7MsrOffCoreResponseN,/**< Takes register number. */
+ kCpumMsrRdFn_IntelI7MiscPwrMgmt,
+ kCpumMsrRdFn_IntelP6CrN,
+ kCpumMsrRdFn_IntelCpuId1FeatureMaskEcdx,
+ kCpumMsrRdFn_IntelCpuId1FeatureMaskEax,
+ kCpumMsrRdFn_IntelCpuId80000001FeatureMaskEcdx,
+ kCpumMsrRdFn_IntelI7SandyAesNiCtl,
+ kCpumMsrRdFn_IntelI7TurboRatioLimit, /**< Returns range value. */
+ kCpumMsrRdFn_IntelI7LbrSelect,
+ kCpumMsrRdFn_IntelI7SandyErrorControl,
+ kCpumMsrRdFn_IntelI7VirtualLegacyWireCap,/**< Returns range value. */
+ kCpumMsrRdFn_IntelI7PowerCtl,
+ kCpumMsrRdFn_IntelI7SandyPebsNumAlt,
+ kCpumMsrRdFn_IntelI7PebsLdLat,
+ kCpumMsrRdFn_IntelI7PkgCnResidencyN, /**< Takes C-state number. */
+ kCpumMsrRdFn_IntelI7CoreCnResidencyN, /**< Takes C-state number. */
+ kCpumMsrRdFn_IntelI7SandyVrCurrentConfig,/**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7SandyVrMiscConfig, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7SandyRaplPowerUnit, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7SandyPkgCnIrtlN, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7SandyPkgC2Residency, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7RaplPkgPowerLimit, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7RaplPkgEnergyStatus, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7RaplPkgPerfStatus, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7RaplPkgPowerInfo, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7RaplDramPowerLimit, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7RaplDramEnergyStatus,/**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7RaplDramPerfStatus, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7RaplDramPowerInfo, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7RaplPp0PowerLimit, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7RaplPp0EnergyStatus, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7RaplPp0Policy, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7RaplPp0PerfStatus, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7RaplPp1PowerLimit, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7RaplPp1EnergyStatus, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7RaplPp1Policy, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7IvyConfigTdpNominal, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7IvyConfigTdpLevel1, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7IvyConfigTdpLevel2, /**< Takes real value as reference. */
+ kCpumMsrRdFn_IntelI7IvyConfigTdpControl,
+ kCpumMsrRdFn_IntelI7IvyTurboActivationRatio,
+ kCpumMsrRdFn_IntelI7UncPerfGlobalCtrl,
+ kCpumMsrRdFn_IntelI7UncPerfGlobalStatus,
+ kCpumMsrRdFn_IntelI7UncPerfGlobalOvfCtrl,
+ kCpumMsrRdFn_IntelI7UncPerfFixedCtrCtrl,
+ kCpumMsrRdFn_IntelI7UncPerfFixedCtr,
+ kCpumMsrRdFn_IntelI7UncCBoxConfig,
+ kCpumMsrRdFn_IntelI7UncArbPerfCtrN,
+ kCpumMsrRdFn_IntelI7UncArbPerfEvtSelN,
+ kCpumMsrRdFn_IntelCore2EmttmCrTablesN, /**< Range value returned. */
+ kCpumMsrRdFn_IntelCore2SmmCStMiscInfo,
+ kCpumMsrRdFn_IntelCore1ExtConfig,
+ kCpumMsrRdFn_IntelCore1DtsCalControl,
+ kCpumMsrRdFn_IntelCore2PeciControl,
+
+ kCpumMsrRdFn_P6LastBranchFromIp,
+ kCpumMsrRdFn_P6LastBranchToIp,
+ kCpumMsrRdFn_P6LastIntFromIp,
+ kCpumMsrRdFn_P6LastIntToIp,
+
+ kCpumMsrRdFn_AmdFam15hTscRate,
+ kCpumMsrRdFn_AmdFam15hLwpCfg,
+ kCpumMsrRdFn_AmdFam15hLwpCbAddr,
+ kCpumMsrRdFn_AmdFam10hMc4MiscN,
+ kCpumMsrRdFn_AmdK8PerfCtlN,
+ kCpumMsrRdFn_AmdK8PerfCtrN,
+ kCpumMsrRdFn_AmdK8SysCfg, /**< Range value returned. */
+ kCpumMsrRdFn_AmdK8HwCr,
+ kCpumMsrRdFn_AmdK8IorrBaseN,
+ kCpumMsrRdFn_AmdK8IorrMaskN,
+ kCpumMsrRdFn_AmdK8TopOfMemN,
+ kCpumMsrRdFn_AmdK8NbCfg1,
+ kCpumMsrRdFn_AmdK8McXcptRedir,
+ kCpumMsrRdFn_AmdK8CpuNameN,
+ kCpumMsrRdFn_AmdK8HwThermalCtrl, /**< Range value returned. */
+ kCpumMsrRdFn_AmdK8SwThermalCtrl,
+ kCpumMsrRdFn_AmdK8FidVidControl, /**< Range value returned. */
+ kCpumMsrRdFn_AmdK8FidVidStatus, /**< Range value returned. */
+ kCpumMsrRdFn_AmdK8McCtlMaskN,
+ kCpumMsrRdFn_AmdK8SmiOnIoTrapN,
+ kCpumMsrRdFn_AmdK8SmiOnIoTrapCtlSts,
+ kCpumMsrRdFn_AmdK8IntPendingMessage,
+ kCpumMsrRdFn_AmdK8SmiTriggerIoCycle,
+ kCpumMsrRdFn_AmdFam10hMmioCfgBaseAddr,
+ kCpumMsrRdFn_AmdFam10hTrapCtlMaybe,
+ kCpumMsrRdFn_AmdFam10hPStateCurLimit, /**< Returns range value. */
+ kCpumMsrRdFn_AmdFam10hPStateControl, /**< Returns range value. */
+ kCpumMsrRdFn_AmdFam10hPStateStatus, /**< Returns range value. */
+ kCpumMsrRdFn_AmdFam10hPStateN, /**< Returns range value. This isn't an register index! */
+ kCpumMsrRdFn_AmdFam10hCofVidControl, /**< Returns range value. */
+ kCpumMsrRdFn_AmdFam10hCofVidStatus, /**< Returns range value. */
+ kCpumMsrRdFn_AmdFam10hCStateIoBaseAddr,
+ kCpumMsrRdFn_AmdFam10hCpuWatchdogTimer,
+ kCpumMsrRdFn_AmdK8SmmBase,
+ kCpumMsrRdFn_AmdK8SmmAddr,
+ kCpumMsrRdFn_AmdK8SmmMask,
+ kCpumMsrRdFn_AmdK8VmCr,
+ kCpumMsrRdFn_AmdK8IgnNe,
+ kCpumMsrRdFn_AmdK8SmmCtl,
+ kCpumMsrRdFn_AmdK8VmHSavePa,
+ kCpumMsrRdFn_AmdFam10hVmLockKey,
+ kCpumMsrRdFn_AmdFam10hSmmLockKey,
+ kCpumMsrRdFn_AmdFam10hLocalSmiStatus,
+ kCpumMsrRdFn_AmdFam10hOsVisWrkIdLength,
+ kCpumMsrRdFn_AmdFam10hOsVisWrkStatus,
+ kCpumMsrRdFn_AmdFam16hL2IPerfCtlN,
+ kCpumMsrRdFn_AmdFam16hL2IPerfCtrN,
+ kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtlN,
+ kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtrN,
+ kCpumMsrRdFn_AmdK7MicrocodeCtl, /**< Returns range value. */
+ kCpumMsrRdFn_AmdK7ClusterIdMaybe, /**< Returns range value. */
+ kCpumMsrRdFn_AmdK8CpuIdCtlStd07hEbax,
+ kCpumMsrRdFn_AmdK8CpuIdCtlStd06hEcx,
+ kCpumMsrRdFn_AmdK8CpuIdCtlStd01hEdcx,
+ kCpumMsrRdFn_AmdK8CpuIdCtlExt01hEdcx,
+ kCpumMsrRdFn_AmdK8PatchLevel, /**< Returns range value. */
+ kCpumMsrRdFn_AmdK7DebugStatusMaybe,
+ kCpumMsrRdFn_AmdK7BHTraceBaseMaybe,
+ kCpumMsrRdFn_AmdK7BHTracePtrMaybe,
+ kCpumMsrRdFn_AmdK7BHTraceLimitMaybe,
+ kCpumMsrRdFn_AmdK7HardwareDebugToolCfgMaybe,
+ kCpumMsrRdFn_AmdK7FastFlushCountMaybe,
+ kCpumMsrRdFn_AmdK7NodeId,
+ kCpumMsrRdFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
+ kCpumMsrRdFn_AmdK7Dr0DataMatchMaybe,
+ kCpumMsrRdFn_AmdK7Dr0DataMaskMaybe,
+ kCpumMsrRdFn_AmdK7LoadStoreCfg,
+ kCpumMsrRdFn_AmdK7InstrCacheCfg,
+ kCpumMsrRdFn_AmdK7DataCacheCfg,
+ kCpumMsrRdFn_AmdK7BusUnitCfg,
+ kCpumMsrRdFn_AmdK7DebugCtl2Maybe,
+ kCpumMsrRdFn_AmdFam15hFpuCfg,
+ kCpumMsrRdFn_AmdFam15hDecoderCfg,
+ kCpumMsrRdFn_AmdFam10hBusUnitCfg2,
+ kCpumMsrRdFn_AmdFam15hCombUnitCfg,
+ kCpumMsrRdFn_AmdFam15hCombUnitCfg2,
+ kCpumMsrRdFn_AmdFam15hCombUnitCfg3,
+ kCpumMsrRdFn_AmdFam15hExecUnitCfg,
+ kCpumMsrRdFn_AmdFam15hLoadStoreCfg2,
+ kCpumMsrRdFn_AmdFam10hIbsFetchCtl,
+ kCpumMsrRdFn_AmdFam10hIbsFetchLinAddr,
+ kCpumMsrRdFn_AmdFam10hIbsFetchPhysAddr,
+ kCpumMsrRdFn_AmdFam10hIbsOpExecCtl,
+ kCpumMsrRdFn_AmdFam10hIbsOpRip,
+ kCpumMsrRdFn_AmdFam10hIbsOpData,
+ kCpumMsrRdFn_AmdFam10hIbsOpData2,
+ kCpumMsrRdFn_AmdFam10hIbsOpData3,
+ kCpumMsrRdFn_AmdFam10hIbsDcLinAddr,
+ kCpumMsrRdFn_AmdFam10hIbsDcPhysAddr,
+ kCpumMsrRdFn_AmdFam10hIbsCtl,
+ kCpumMsrRdFn_AmdFam14hIbsBrTarget,
+
+ /** End of valid MSR read function indexes. */
+ kCpumMsrRdFn_End
+} CPUMMSRRDFN;
+
+/**
+ * MSR write functions.
+ */
+typedef enum CPUMMSRWRFN
+{
+ /** Invalid zero value. */
+ kCpumMsrWrFn_Invalid = 0,
+ /** Writes are ignored, the fWrGpMask is observed though. */
+ kCpumMsrWrFn_IgnoreWrite,
+ /** Writes cause GP(0) to be raised, the fWrGpMask should be UINT64_MAX. */
+ kCpumMsrWrFn_ReadOnly,
+ /** Alias to the MSR range starting at the MSR given by
+ * CPUMMSRRANGE::uValue. Must be used in pair with
+ * kCpumMsrRdFn_MsrAlias. */
+ kCpumMsrWrFn_MsrAlias,
+
+ kCpumMsrWrFn_Ia32P5McAddr,
+ kCpumMsrWrFn_Ia32P5McType,
+ kCpumMsrWrFn_Ia32TimestampCounter,
+ kCpumMsrWrFn_Ia32ApicBase,
+ kCpumMsrWrFn_Ia32FeatureControl,
+ kCpumMsrWrFn_Ia32BiosSignId,
+ kCpumMsrWrFn_Ia32BiosUpdateTrigger,
+ kCpumMsrWrFn_Ia32SmmMonitorCtl,
+ kCpumMsrWrFn_Ia32PmcN,
+ kCpumMsrWrFn_Ia32MonitorFilterLineSize,
+ kCpumMsrWrFn_Ia32MPerf,
+ kCpumMsrWrFn_Ia32APerf,
+ kCpumMsrWrFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
+ kCpumMsrWrFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
+ kCpumMsrWrFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
+ kCpumMsrWrFn_Ia32MtrrDefType,
+ kCpumMsrWrFn_Ia32Pat,
+ kCpumMsrWrFn_Ia32SysEnterCs,
+ kCpumMsrWrFn_Ia32SysEnterEsp,
+ kCpumMsrWrFn_Ia32SysEnterEip,
+ kCpumMsrWrFn_Ia32McgStatus,
+ kCpumMsrWrFn_Ia32McgCtl,
+ kCpumMsrWrFn_Ia32DebugCtl,
+ kCpumMsrWrFn_Ia32SmrrPhysBase,
+ kCpumMsrWrFn_Ia32SmrrPhysMask,
+ kCpumMsrWrFn_Ia32PlatformDcaCap,
+ kCpumMsrWrFn_Ia32Dca0Cap,
+ kCpumMsrWrFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
+ kCpumMsrWrFn_Ia32PerfStatus,
+ kCpumMsrWrFn_Ia32PerfCtl,
+ kCpumMsrWrFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
+ kCpumMsrWrFn_Ia32PerfCapabilities,
+ kCpumMsrWrFn_Ia32FixedCtrCtrl,
+ kCpumMsrWrFn_Ia32PerfGlobalStatus,
+ kCpumMsrWrFn_Ia32PerfGlobalCtrl,
+ kCpumMsrWrFn_Ia32PerfGlobalOvfCtrl,
+ kCpumMsrWrFn_Ia32PebsEnable,
+ kCpumMsrWrFn_Ia32ClockModulation,
+ kCpumMsrWrFn_Ia32ThermInterrupt,
+ kCpumMsrWrFn_Ia32ThermStatus,
+ kCpumMsrWrFn_Ia32Therm2Ctl,
+ kCpumMsrWrFn_Ia32MiscEnable,
+ kCpumMsrWrFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
+ kCpumMsrWrFn_Ia32McNCtl2, /**< Takes register number of start of range. */
+ kCpumMsrWrFn_Ia32DsArea,
+ kCpumMsrWrFn_Ia32TscDeadline,
+ kCpumMsrWrFn_Ia32X2ApicN,
+ kCpumMsrWrFn_Ia32DebugInterface,
+
+ kCpumMsrWrFn_Amd64Efer,
+ kCpumMsrWrFn_Amd64SyscallTarget,
+ kCpumMsrWrFn_Amd64LongSyscallTarget,
+ kCpumMsrWrFn_Amd64CompSyscallTarget,
+ kCpumMsrWrFn_Amd64SyscallFlagMask,
+ kCpumMsrWrFn_Amd64FsBase,
+ kCpumMsrWrFn_Amd64GsBase,
+ kCpumMsrWrFn_Amd64KernelGsBase,
+ kCpumMsrWrFn_Amd64TscAux,
+ kCpumMsrWrFn_IntelEblCrPowerOn,
+ kCpumMsrWrFn_IntelP4EbcHardPowerOn,
+ kCpumMsrWrFn_IntelP4EbcSoftPowerOn,
+ kCpumMsrWrFn_IntelP4EbcFrequencyId,
+ kCpumMsrWrFn_IntelFlexRatio,
+ kCpumMsrWrFn_IntelPkgCStConfigControl,
+ kCpumMsrWrFn_IntelPmgIoCaptureBase,
+ kCpumMsrWrFn_IntelLastBranchFromToN,
+ kCpumMsrWrFn_IntelLastBranchFromN,
+ kCpumMsrWrFn_IntelLastBranchToN,
+ kCpumMsrWrFn_IntelLastBranchTos,
+ kCpumMsrWrFn_IntelBblCrCtl,
+ kCpumMsrWrFn_IntelBblCrCtl3,
+ kCpumMsrWrFn_IntelI7TemperatureTarget,
+ kCpumMsrWrFn_IntelI7MsrOffCoreResponseN, /**< Takes register number. */
+ kCpumMsrWrFn_IntelI7MiscPwrMgmt,
+ kCpumMsrWrFn_IntelP6CrN,
+ kCpumMsrWrFn_IntelCpuId1FeatureMaskEcdx,
+ kCpumMsrWrFn_IntelCpuId1FeatureMaskEax,
+ kCpumMsrWrFn_IntelCpuId80000001FeatureMaskEcdx,
+ kCpumMsrWrFn_IntelI7SandyAesNiCtl,
+ kCpumMsrWrFn_IntelI7TurboRatioLimit,
+ kCpumMsrWrFn_IntelI7LbrSelect,
+ kCpumMsrWrFn_IntelI7SandyErrorControl,
+ kCpumMsrWrFn_IntelI7PowerCtl,
+ kCpumMsrWrFn_IntelI7SandyPebsNumAlt,
+ kCpumMsrWrFn_IntelI7PebsLdLat,
+ kCpumMsrWrFn_IntelI7SandyVrCurrentConfig,
+ kCpumMsrWrFn_IntelI7SandyVrMiscConfig,
+ kCpumMsrWrFn_IntelI7SandyPkgCnIrtlN,
+ kCpumMsrWrFn_IntelI7RaplPkgPowerLimit,
+ kCpumMsrWrFn_IntelI7RaplDramPowerLimit,
+ kCpumMsrWrFn_IntelI7RaplPp0PowerLimit,
+ kCpumMsrWrFn_IntelI7RaplPp0Policy,
+ kCpumMsrWrFn_IntelI7RaplPp1PowerLimit,
+ kCpumMsrWrFn_IntelI7RaplPp1Policy,
+ kCpumMsrWrFn_IntelI7IvyConfigTdpControl,
+ kCpumMsrWrFn_IntelI7IvyTurboActivationRatio,
+ kCpumMsrWrFn_IntelI7UncPerfGlobalCtrl,
+ kCpumMsrWrFn_IntelI7UncPerfGlobalStatus,
+ kCpumMsrWrFn_IntelI7UncPerfGlobalOvfCtrl,
+ kCpumMsrWrFn_IntelI7UncPerfFixedCtrCtrl,
+ kCpumMsrWrFn_IntelI7UncPerfFixedCtr,
+ kCpumMsrWrFn_IntelI7UncArbPerfCtrN,
+ kCpumMsrWrFn_IntelI7UncArbPerfEvtSelN,
+ kCpumMsrWrFn_IntelCore2EmttmCrTablesN,
+ kCpumMsrWrFn_IntelCore2SmmCStMiscInfo,
+ kCpumMsrWrFn_IntelCore1ExtConfig,
+ kCpumMsrWrFn_IntelCore1DtsCalControl,
+ kCpumMsrWrFn_IntelCore2PeciControl,
+
+ kCpumMsrWrFn_P6LastIntFromIp,
+ kCpumMsrWrFn_P6LastIntToIp,
+
+ kCpumMsrWrFn_AmdFam15hTscRate,
+ kCpumMsrWrFn_AmdFam15hLwpCfg,
+ kCpumMsrWrFn_AmdFam15hLwpCbAddr,
+ kCpumMsrWrFn_AmdFam10hMc4MiscN,
+ kCpumMsrWrFn_AmdK8PerfCtlN,
+ kCpumMsrWrFn_AmdK8PerfCtrN,
+ kCpumMsrWrFn_AmdK8SysCfg,
+ kCpumMsrWrFn_AmdK8HwCr,
+ kCpumMsrWrFn_AmdK8IorrBaseN,
+ kCpumMsrWrFn_AmdK8IorrMaskN,
+ kCpumMsrWrFn_AmdK8TopOfMemN,
+ kCpumMsrWrFn_AmdK8NbCfg1,
+ kCpumMsrWrFn_AmdK8McXcptRedir,
+ kCpumMsrWrFn_AmdK8CpuNameN,
+ kCpumMsrWrFn_AmdK8HwThermalCtrl,
+ kCpumMsrWrFn_AmdK8SwThermalCtrl,
+ kCpumMsrWrFn_AmdK8FidVidControl,
+ kCpumMsrWrFn_AmdK8McCtlMaskN,
+ kCpumMsrWrFn_AmdK8SmiOnIoTrapN,
+ kCpumMsrWrFn_AmdK8SmiOnIoTrapCtlSts,
+ kCpumMsrWrFn_AmdK8IntPendingMessage,
+ kCpumMsrWrFn_AmdK8SmiTriggerIoCycle,
+ kCpumMsrWrFn_AmdFam10hMmioCfgBaseAddr,
+ kCpumMsrWrFn_AmdFam10hTrapCtlMaybe,
+ kCpumMsrWrFn_AmdFam10hPStateControl,
+ kCpumMsrWrFn_AmdFam10hPStateStatus,
+ kCpumMsrWrFn_AmdFam10hPStateN,
+ kCpumMsrWrFn_AmdFam10hCofVidControl,
+ kCpumMsrWrFn_AmdFam10hCofVidStatus,
+ kCpumMsrWrFn_AmdFam10hCStateIoBaseAddr,
+ kCpumMsrWrFn_AmdFam10hCpuWatchdogTimer,
+ kCpumMsrWrFn_AmdK8SmmBase,
+ kCpumMsrWrFn_AmdK8SmmAddr,
+ kCpumMsrWrFn_AmdK8SmmMask,
+ kCpumMsrWrFn_AmdK8VmCr,
+ kCpumMsrWrFn_AmdK8IgnNe,
+ kCpumMsrWrFn_AmdK8SmmCtl,
+ kCpumMsrWrFn_AmdK8VmHSavePa,
+ kCpumMsrWrFn_AmdFam10hVmLockKey,
+ kCpumMsrWrFn_AmdFam10hSmmLockKey,
+ kCpumMsrWrFn_AmdFam10hLocalSmiStatus,
+ kCpumMsrWrFn_AmdFam10hOsVisWrkIdLength,
+ kCpumMsrWrFn_AmdFam10hOsVisWrkStatus,
+ kCpumMsrWrFn_AmdFam16hL2IPerfCtlN,
+ kCpumMsrWrFn_AmdFam16hL2IPerfCtrN,
+ kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtlN,
+ kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtrN,
+ kCpumMsrWrFn_AmdK7MicrocodeCtl,
+ kCpumMsrWrFn_AmdK7ClusterIdMaybe,
+ kCpumMsrWrFn_AmdK8CpuIdCtlStd07hEbax,
+ kCpumMsrWrFn_AmdK8CpuIdCtlStd06hEcx,
+ kCpumMsrWrFn_AmdK8CpuIdCtlStd01hEdcx,
+ kCpumMsrWrFn_AmdK8CpuIdCtlExt01hEdcx,
+ kCpumMsrWrFn_AmdK8PatchLoader,
+ kCpumMsrWrFn_AmdK7DebugStatusMaybe,
+ kCpumMsrWrFn_AmdK7BHTraceBaseMaybe,
+ kCpumMsrWrFn_AmdK7BHTracePtrMaybe,
+ kCpumMsrWrFn_AmdK7BHTraceLimitMaybe,
+ kCpumMsrWrFn_AmdK7HardwareDebugToolCfgMaybe,
+ kCpumMsrWrFn_AmdK7FastFlushCountMaybe,
+ kCpumMsrWrFn_AmdK7NodeId,
+ kCpumMsrWrFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
+ kCpumMsrWrFn_AmdK7Dr0DataMatchMaybe,
+ kCpumMsrWrFn_AmdK7Dr0DataMaskMaybe,
+ kCpumMsrWrFn_AmdK7LoadStoreCfg,
+ kCpumMsrWrFn_AmdK7InstrCacheCfg,
+ kCpumMsrWrFn_AmdK7DataCacheCfg,
+ kCpumMsrWrFn_AmdK7BusUnitCfg,
+ kCpumMsrWrFn_AmdK7DebugCtl2Maybe,
+ kCpumMsrWrFn_AmdFam15hFpuCfg,
+ kCpumMsrWrFn_AmdFam15hDecoderCfg,
+ kCpumMsrWrFn_AmdFam10hBusUnitCfg2,
+ kCpumMsrWrFn_AmdFam15hCombUnitCfg,
+ kCpumMsrWrFn_AmdFam15hCombUnitCfg2,
+ kCpumMsrWrFn_AmdFam15hCombUnitCfg3,
+ kCpumMsrWrFn_AmdFam15hExecUnitCfg,
+ kCpumMsrWrFn_AmdFam15hLoadStoreCfg2,
+ kCpumMsrWrFn_AmdFam10hIbsFetchCtl,
+ kCpumMsrWrFn_AmdFam10hIbsFetchLinAddr,
+ kCpumMsrWrFn_AmdFam10hIbsFetchPhysAddr,
+ kCpumMsrWrFn_AmdFam10hIbsOpExecCtl,
+ kCpumMsrWrFn_AmdFam10hIbsOpRip,
+ kCpumMsrWrFn_AmdFam10hIbsOpData,
+ kCpumMsrWrFn_AmdFam10hIbsOpData2,
+ kCpumMsrWrFn_AmdFam10hIbsOpData3,
+ kCpumMsrWrFn_AmdFam10hIbsDcLinAddr,
+ kCpumMsrWrFn_AmdFam10hIbsDcPhysAddr,
+ kCpumMsrWrFn_AmdFam10hIbsCtl,
+ kCpumMsrWrFn_AmdFam14hIbsBrTarget,
+
+ /** End of valid MSR write function indexes. */
+ kCpumMsrWrFn_End
+} CPUMMSRWRFN;
+
+/**
+ * MSR range.
+ */
+typedef struct CPUMMSRRANGE
+{
+ /** The first MSR. [0] */
+ uint32_t uFirst;
+ /** The last MSR. [4] */
+ uint32_t uLast;
+ /** The read function (CPUMMSRRDFN). [8] */
+ uint16_t enmRdFn;
+ /** The write function (CPUMMSRWRFN). [10] */
+ uint16_t enmWrFn;
+ /** The offset of the 64-bit MSR value relative to the start of CPUMCPU.
+ * UINT16_MAX if not used by the read and write functions. [12] */
+ uint16_t offCpumCpu;
+ /** Reserved for future hacks. [14] */
+ uint16_t fReserved;
+ /** The init/read value. [16]
+ * When enmRdFn is kCpumMsrRdFn_INIT_VALUE, this is the value returned on RDMSR.
+ * offCpumCpu must be UINT16_MAX in that case, otherwise it must be a valid
+ * offset into CPUM. */
+ uint64_t uValue;
+ /** The bits to ignore when writing. [24] */
+ uint64_t fWrIgnMask;
+ /** The bits that will cause a GP(0) when writing. [32]
+ * This is always checked prior to calling the write function. Using
+ * UINT64_MAX effectively marks the MSR as read-only. */
+ uint64_t fWrGpMask;
+ /** The register name, if applicable. [40] */
+ char szName[56];
+
+#ifdef VBOX_WITH_STATISTICS
+ /** The number of reads. */
+ STAMCOUNTER cReads;
+ /** The number of writes. */
+ STAMCOUNTER cWrites;
+ /** The number of times ignored bits were written. */
+ STAMCOUNTER cIgnoredBits;
+ /** The number of GPs generated. */
+ STAMCOUNTER cGps;
+#endif
+} CPUMMSRRANGE;
+#ifdef VBOX_WITH_STATISTICS
+AssertCompileSize(CPUMMSRRANGE, 128);
+#else
+AssertCompileSize(CPUMMSRRANGE, 96);
+#endif
+/** Pointer to an MSR range. */
+typedef CPUMMSRRANGE *PCPUMMSRRANGE;
+/** Pointer to a const MSR range. */
+typedef CPUMMSRRANGE const *PCCPUMMSRRANGE;
+
+
+
+
+/**
+ * CPU features and quirks.
+ * This is mostly exploded CPUID info.
+ */
+typedef struct CPUMFEATURES
+{
+ /** The CPU vendor (CPUMCPUVENDOR). */
+ uint8_t enmCpuVendor;
+ /** The CPU family. */
+ uint8_t uFamily;
+ /** The CPU model. */
+ uint8_t uModel;
+ /** The CPU stepping. */
+ uint8_t uStepping;
+ /** The microarchitecture. */
+ CPUMMICROARCH enmMicroarch;
+ /** The maximum physical address with of the CPU. */
+ uint8_t cMaxPhysAddrWidth;
+ /** Alignment padding. */
+ uint8_t abPadding[3];
+
+ /** Supports MSRs. */
+ uint32_t fMsr : 1;
+ /** Supports the page size extension (4/2 MB pages). */
+ uint32_t fPse : 1;
+ /** Supports 36-bit page size extension (4 MB pages can map memory above
+ * 4GB). */
+ uint32_t fPse36 : 1;
+ /** Supports physical address extension (PAE). */
+ uint32_t fPae : 1;
+ /** Page attribute table (PAT) support (page level cache control). */
+ uint32_t fPat : 1;
+ /** Supports the FXSAVE and FXRSTOR instructions. */
+ uint32_t fFxSaveRstor : 1;
+ /** Intel SYSENTER/SYSEXIT support */
+ uint32_t fSysEnter : 1;
+ /** First generation APIC. */
+ uint32_t fApic : 1;
+ /** Second generation APIC. */
+ uint32_t fX2Apic : 1;
+ /** Hypervisor present. */
+ uint32_t fHypervisorPresent : 1;
+ /** MWAIT & MONITOR instructions supported. */
+ uint32_t fMonitorMWait : 1;
+
+ /** AMD64: Supports long mode. */
+ uint32_t fLongMode : 1;
+ /** AMD64: SYSCALL/SYSRET support. */
+ uint32_t fSysCall : 1;
+ /** AMD64: No-execute page table bit. */
+ uint32_t fNoExecute : 1;
+ /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */
+ uint32_t fLahfSahf : 1;
+ /** AMD64: Supports RDTSCP. */
+ uint32_t fRdTscP : 1;
+
+ /** Indicates that FPU instruction and data pointers may leak.
+ * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer
+ * is only saved and restored if an exception is pending. */
+ uint32_t fLeakyFxSR : 1;
+
+ /** Alignment padding. */
+ uint32_t fPadding : 9;
+
+ uint64_t auPadding[2];
+} CPUMFEATURES;
+AssertCompileSize(CPUMFEATURES, 32);
+/** Pointer to a CPU feature structure. */
+typedef CPUMFEATURES *PCPUMFEATURES;
+/** Pointer to a const CPU feature structure. */
+typedef CPUMFEATURES const *PCCPUMFEATURES;
+
+
+/**
+ * CPU info
+ */
+typedef struct CPUMINFO
+{
+ /** The number of MSR ranges (CPUMMSRRANGE) in the array pointed to below. */
+ uint32_t cMsrRanges;
+ /** Mask applied to ECX before looking up the MSR for a RDMSR/WRMSR
+ * instruction. Older hardware has been observed to ignore higher bits. */
+ uint32_t fMsrMask;
+
+ /** The number of CPUID leaves (CPUMCPUIDLEAF) in the array pointed to below. */
+ uint32_t cCpuIdLeaves;
+ /** The index of the first extended CPUID leaf in the array.
+ * Set to cCpuIdLeaves if none present. */
+ uint32_t iFirstExtCpuIdLeaf;
+ /** Alignment padding. */
+ uint32_t uPadding;
+ /** How to handle unknown CPUID leaves. */
+ CPUMUKNOWNCPUID enmUnknownCpuIdMethod;
+ /** For use with CPUMUKNOWNCPUID_DEFAULTS. */
+ CPUMCPUID DefCpuId;
+
+ /** Scalable bus frequency used for reporting other frequencies. */
+ uint64_t uScalableBusFreq;
+
+ /** Pointer to the MSR ranges (ring-0 pointer). */
+ R0PTRTYPE(PCPUMMSRRANGE) paMsrRangesR0;
+ /** Pointer to the CPUID leaves (ring-0 pointer). */
+ R0PTRTYPE(PCPUMCPUIDLEAF) paCpuIdLeavesR0;
+
+ /** Pointer to the MSR ranges (ring-3 pointer). */
+ R3PTRTYPE(PCPUMMSRRANGE) paMsrRangesR3;
+ /** Pointer to the CPUID leaves (ring-3 pointer). */
+ R3PTRTYPE(PCPUMCPUIDLEAF) paCpuIdLeavesR3;
+
+ /** Pointer to the MSR ranges (raw-mode context pointer). */
+ RCPTRTYPE(PCPUMMSRRANGE) paMsrRangesRC;
+ /** Pointer to the CPUID leaves (raw-mode context pointer). */
+ RCPTRTYPE(PCPUMCPUIDLEAF) paCpuIdLeavesRC;
+} CPUMINFO;
+/** Pointer to a CPU info structure. */
+typedef CPUMINFO *PCPUMINFO;
+/** Pointer to a const CPU info structure. */
+typedef CPUMINFO const *CPCPUMINFO;
+
+
+/**
* The saved host CPU state.
*
* @remark The special VBOX_WITH_HYBRID_32BIT_KERNEL checks here are for the 10.4.x series
@@ -289,26 +976,19 @@ typedef struct CPUM
uint32_t ecx;
} CPUFeaturesExt;
- /** Host CPU manufacturer. */
- CPUMCPUVENDOR enmHostCpuVendor;
- /** Guest CPU manufacturer. */
- CPUMCPUVENDOR enmGuestCpuVendor;
-
/** CR4 mask */
struct
{
- uint32_t AndMask;
+ uint32_t AndMask; /**< @todo Move these to the per-CPU structure and fix the switchers. Saves a register! */
uint32_t OrMask;
} CR4;
- /** Synthetic CPU type? */
- bool fSyntheticCpu;
/** The (more) portable CPUID level. */
uint8_t u8PortableCpuIdLevel;
/** Indicates that a state restore is pending.
* This is used to verify load order dependencies (PGM). */
bool fPendingRestore;
- uint8_t abPadding[HC_ARCH_BITS == 64 ? 5 : 1];
+ uint8_t abPadding[HC_ARCH_BITS == 64 ? 6 : 2];
/** The standard set of CpuId leaves. */
CPUMCPUID aGuestCpuIdStd[6];
@@ -325,11 +1005,23 @@ typedef struct CPUM
uint8_t abPadding2[4];
#endif
-#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
- RTHCPTR pvApicBase;
- uint32_t fApicDisVectors;
- uint8_t abPadding3[4];
-#endif
+ /** Guest CPU info. */
+ CPUMINFO GuestInfo;
+ /** Guest CPU feature information. */
+ CPUMFEATURES GuestFeatures;
+ /** Host CPU feature information. */
+ CPUMFEATURES HostFeatures;
+
+ /** @name MSR statistics.
+ * @{ */
+ STAMCOUNTER cMsrWrites;
+ STAMCOUNTER cMsrWritesToIgnoredBits;
+ STAMCOUNTER cMsrWritesRaiseGp;
+ STAMCOUNTER cMsrWritesUnknown;
+ STAMCOUNTER cMsrReads;
+ STAMCOUNTER cMsrReadsRaiseGp;
+ STAMCOUNTER cMsrReadsUnknown;
+ /** @} */
} CPUM;
/** Pointer to the CPUM instance data residing in the shared VM structure. */
typedef CPUM *PCPUM;
@@ -387,13 +1079,27 @@ typedef struct CPUMCPU
* 32-64 switcher. */
uint32_t u32RetCode;
+#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+ /** The address of the APIC mapping, NULL if no APIC.
+ * Call CPUMR0SetLApic to update this before doing a world switch. */
+ RTHCPTR pvApicBase;
+ /** Used by the world switcher code to store which vectors needs restoring on
+ * the way back. */
+ uint32_t fApicDisVectors;
+ /** Set if the CPU has the X2APIC mode enabled.
+ * Call CPUMR0SetLApic to update this before doing a world switch. */
+ bool fX2Apic;
+#else
+ uint8_t abPadding3[(HC_ARCH_BITS == 64 ? 8 : 4) + 4 + 1];
+#endif
+
/** Have we entered raw-mode? */
bool fRawEntered;
/** Have we entered the recompiler? */
bool fRemEntered;
/** Align the structure on a 64-byte boundary. */
- uint8_t abPadding2[64 - 16 - 2];
+ uint8_t abPadding2[64 - 16 - (HC_ARCH_BITS == 64 ? 8 : 4) - 4 - 1 - 2];
} CPUMCPU;
/** Pointer to the CPUMCPU instance data residing in the shared VMCPU structure. */
typedef CPUMCPU *PCPUMCPU;
@@ -401,11 +1107,27 @@ typedef CPUMCPU *PCPUMCPU;
#ifndef VBOX_FOR_DTRACE_LIB
RT_C_DECLS_BEGIN
+PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf);
+
#ifdef IN_RING3
int cpumR3DbgInit(PVM pVM);
+PCPUMCPUIDLEAF cpumR3CpuIdGetLeaf(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf);
+bool cpumR3CpuIdGetLeafLegacy(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf,
+ PCPUMCPUID pLeagcy);
+int cpumR3CpuIdInsert(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves, PCPUMCPUIDLEAF pNewLeaf);
+void cpumR3CpuIdRemoveRange(PCPUMCPUIDLEAF paLeaves, uint32_t *pcLeaves, uint32_t uFirst, uint32_t uLast);
+int cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCPUMFEATURES pFeatures);
+int cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo);
+int cpumR3MsrRangesInsert(PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange);
+int cpumR3MsrApplyFudge(PVM pVM);
+int cpumR3MsrRegStats(PVM pVM);
+int cpumR3MsrStrictInitChecks(void);
+PCPUMMSRRANGE cpumLookupMsrRange(PVM pVM, uint32_t idMsr);
#endif
+#ifdef IN_RC
DECLASM(int) cpumHandleLazyFPUAsm(PCPUMCPU pCPUM);
+#endif
#ifdef IN_RING0
DECLASM(int) cpumR0SaveHostRestoreGuestFPUState(PCPUMCPU pCPUM);
diff --git a/src/VBox/VMM/include/CPUMInternal.mac b/src/VBox/VMM/include/CPUMInternal.mac
index 6206a3ae..5e1f66b2 100644
--- a/src/VBox/VMM/include/CPUMInternal.mac
+++ b/src/VBox/VMM/include/CPUMInternal.mac
@@ -4,7 +4,7 @@
;
;
-; Copyright (C) 2006-2010 Oracle Corporation
+; Copyright (C) 2006-2012 Oracle Corporation
;
; This file is part of VirtualBox Open Source Edition (OSE), as
; available from http://www.virtualbox.org. This file is free software;
@@ -17,14 +17,21 @@
%include "VBox/asmdefs.mac"
+
%define CPUM_USED_FPU RT_BIT(0)
%define CPUM_USED_FPU_SINCE_REM RT_BIT(1)
-%define CPUM_USE_SYSENTER RT_BIT(2)
-%define CPUM_USE_SYSCALL RT_BIT(3)
-%define CPUM_USE_DEBUG_REGS_HOST RT_BIT(4)
-%define CPUM_USE_DEBUG_REGS RT_BIT(5)
-%define CPUM_SYNC_FPU_STATE RT_BIT(7)
-%define CPUM_SYNC_DEBUG_STATE RT_BIT(8)
+%define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2)
+%define CPUM_USE_SYSENTER RT_BIT(3)
+%define CPUM_USE_SYSCALL RT_BIT(4)
+%define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5)
+%define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6)
+%define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7)
+%define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8)
+%define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9)
+%define CPUM_SYNC_FPU_STATE RT_BIT(16)
+%define CPUM_SYNC_DEBUG_REGS_GUEST RT_BIT(17)
+%define CPUM_SYNC_DEBUG_REGS_HYPER RT_BIT(18)
+%define CPUM_USE_FFXSR_LEAKY RT_BIT(19)
%define CPUM_HANDLER_DS 1
%define CPUM_HANDLER_ES 2
@@ -59,20 +66,16 @@ struc CPUM
.CPUFeaturesExt.edx resd 1
.CPUFeaturesExt.ecx resd 1
- .enmHostCpuVendor resd 1
- .enmGuestCpuVendor resd 1
-
; CR4 masks
.CR4.AndMask resd 1
.CR4.OrMask resd 1
; entered rawmode?
- .fSyntheticCpu resb 1
.u8PortableCpuIdLevel resb 1
.fPendingRestore resb 1
%if RTHCPTR_CB == 8
- .abPadding resb 5
+ .abPadding resb 6
%else
- .abPadding resb 1
+ .abPadding resb 2
%endif
; CPUID leafs
@@ -86,11 +89,17 @@ struc CPUM
.abPadding2 resb 4
%endif
-%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
- .pvApicBase RTR0PTR_RES 1
- .fApicDisVectors resd 1
- .abPadding3 resb 4
-%endif
+ .GuestInfo resb RTHCPTR_CB*4 + RTRCPTR_CB*2 + 4*12
+ .GuestFeatures resb 32
+ .HostFeatures resb 32
+
+ .cMsrWrites resq 1
+ .cMsrWritesToIgnoredBits resq 1
+ .cMsrWritesRaiseGp resq 1
+ .cMsrWritesUnknown resq 1
+ .cMsrReads resq 1
+ .cMsrReadsRaiseGp resq 1
+ .cMsrReadsUnknown resq 1
endstruc
struc CPUMCPU
@@ -195,6 +204,7 @@ struc CPUMCPU
.Hyper.msrCSTAR resb 8
.Hyper.msrSFMASK resb 8
.Hyper.msrKERNELGSBASE resb 8
+ .Hyper.msrApicBase resb 8
;
; Host context state
@@ -415,6 +425,7 @@ struc CPUMCPU
.Guest.msrCSTAR resb 8
.Guest.msrSFMASK resb 8
.Guest.msrKERNELGSBASE resb 8
+ .Guest.msrApicBase resb 8
alignb 64
@@ -427,9 +438,19 @@ struc CPUMCPU
.fChanged resd 1
.offCPUM resd 1
.u32RetCode resd 1
+
+%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+ .pvApicBase RTR0PTR_RES 1
+ .fApicDisVectors resd 1
+ .fX2Apic resb 1
+%else
+ .abPadding3 resb (RTR0PTR_CB + 4 + 1)
+%endif
+
.fRawEntered resb 1
.fRemEntered resb 1
- .abPadding2 resb (64 - 16 - 2)
+
+ .abPadding2 resb (64 - 16 - RTR0PTR_CB - 4 - 1 - 2)
endstruc
@@ -442,7 +463,7 @@ endstruc
;;
; Converts the CPUM pointer to CPUMCPU
-; @param %1 register name (PVM)
+; @param %1 register name (CPUM)
; @param %2 register name (CPUMCPU offset)
%macro CPUMCPU_FROM_CPUM_WITH_OFFSET 2
add %1, %2
@@ -457,7 +478,7 @@ endstruc
;;
; Converts the CPUMCPU pointer to CPUM
-; @param %1 register name (PVM)
+; @param %1 register name (CPUM)
; @param %2 register name (CPUMCPU offset)
%macro CPUM_FROM_CPUMCPU_WITH_OFFSET 2
sub %1, %2
diff --git a/src/VBox/VMM/include/CSAMInternal.h b/src/VBox/VMM/include/CSAMInternal.h
index 66f89344..7e6f4fb7 100644
--- a/src/VBox/VMM/include/CSAMInternal.h
+++ b/src/VBox/VMM/include/CSAMInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
diff --git a/src/VBox/VMM/include/DBGFInternal.h b/src/VBox/VMM/include/DBGFInternal.h
index 55c147de..8e01d2d3 100644
--- a/src/VBox/VMM/include/DBGFInternal.h
+++ b/src/VBox/VMM/include/DBGFInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -24,6 +24,7 @@
#include <iprt/critsect.h>
#include <iprt/string.h>
#include <iprt/avl.h>
+#include <iprt/dbg.h>
#include <VBox/vmm/dbgf.h>
@@ -232,22 +233,6 @@ typedef struct DBGF
* Not all commands take data. */
DBGFCMDDATA VMMCmdData;
- /** List of registered info handlers. */
- R3PTRTYPE(PDBGFINFO) pInfoFirst;
- /** Critical section protecting the above list. */
- RTCRITSECT InfoCritSect;
-
- /** Range tree containing the loaded symbols of the a VM.
- * This tree will never have blind spots. */
- R3PTRTYPE(AVLRGCPTRTREE) SymbolTree;
- /** Symbol name space. */
- R3PTRTYPE(PRTSTRSPACE) pSymbolSpace;
- /** Indicates whether DBGFSym.cpp is initialized or not.
- * This part is initialized in a lazy manner for performance reasons. */
- bool fSymInited;
- /** Alignment padding. */
- uint32_t uAlignment0;
-
/** The number of hardware breakpoints. */
uint32_t cHwBreakpoints;
/** The number of active breakpoints. */
@@ -258,7 +243,45 @@ typedef struct DBGF
/** Array of int 3 and REM breakpoints. (4..)
* @remark This is currently a fixed size array for reasons of simplicity. */
DBGFBP aBreakpoints[32];
+} DBGF;
+/** Pointer to DBGF Data. */
+typedef DBGF *PDBGF;
+
+
+/** Converts a DBGFCPU pointer into a VM pointer. */
+#define DBGFCPU_2_VM(pDbgfCpu) ((PVM)((uint8_t *)(pDbgfCpu) + (pDbgfCpu)->offVM))
+
+/**
+ * The per CPU data for DBGF.
+ */
+typedef struct DBGFCPU
+{
+ /** The offset into the VM structure.
+ * @see DBGFCPU_2_VM(). */
+ uint32_t offVM;
+
+ /** Current active breakpoint (id).
+ * This is ~0U if not active. It is set when a execution engine
+ * encounters a breakpoint and returns VINF_EM_DBG_BREAKPOINT. This is
+ * currently not used for REM breakpoints because of the lazy coupling
+ * between VBox and REM. */
+ uint32_t iActiveBp;
+ /** Set if we're singlestepping in raw mode.
+ * This is checked and cleared in the \#DB handler. */
+ bool fSingleSteppingRaw;
+
+ /** Padding the structure to 16 bytes. */
+ bool afReserved[7];
+} DBGFCPU;
+/** Pointer to DBGFCPU data. */
+typedef DBGFCPU *PDBGFCPU;
+
+/**
+ * The DBGF data kept in the UVM.
+ */
+typedef struct DBGFUSERPERVM
+{
/** The address space database lock. */
RTSEMRW hAsDbLock;
/** The address space handle database. (Protected by hAsDbLock.) */
@@ -273,6 +296,8 @@ typedef struct DBGF
bool volatile afAsAliasPopuplated[DBGF_AS_COUNT];
/** Alignment padding. */
bool afAlignment1[2];
+ /** Debug configuration. */
+ R3PTRTYPE(RTDBGCFG) hDbgCfg;
/** The register database lock. */
RTSEMRW hRegDbLock;
@@ -292,56 +317,35 @@ typedef struct DBGF
R3PTRTYPE(PDBGFOS) pCurOS;
/** The head of the Guest OS digger instances. */
R3PTRTYPE(PDBGFOS) pOSHead;
-} DBGF;
-/** Pointer to DBGF Data. */
-typedef DBGF *PDBGF;
+ /** List of registered info handlers. */
+ R3PTRTYPE(PDBGFINFO) pInfoFirst;
+ /** Critical section protecting the above list. */
+ RTCRITSECT InfoCritSect;
-/** Converts a DBGFCPU pointer into a VM pointer. */
-#define DBGFCPU_2_VM(pDbgfCpu) ((PVM)((uint8_t *)(pDbgfCpu) + (pDbgfCpu)->offVM))
+} DBGFUSERPERVM;
/**
- * The per CPU data for DBGF.
+ * The per-CPU DBGF data kept in the UVM.
*/
-typedef struct DBGFCPU
+typedef struct DBGFUSERPERVMCPU
{
- /** The offset into the VM structure.
- * @see DBGFCPU_2_VM(). */
- uint32_t offVM;
-
- /** Current active breakpoint (id).
- * This is ~0U if not active. It is set when a execution engine
- * encounters a breakpoint and returns VINF_EM_DBG_BREAKPOINT. This is
- * currently not used for REM breakpoints because of the lazy coupling
- * between VBox and REM. */
- uint32_t iActiveBp;
- /** Set if we're singlestepping in raw mode.
- * This is checked and cleared in the \#DB handler. */
- bool fSingleSteppingRaw;
-
- /** Padding the structure to 16 bytes. */
- bool afReserved[7];
-
/** The guest register set for this CPU. Can be NULL. */
R3PTRTYPE(struct DBGFREGSET *) pGuestRegSet;
/** The hypervisor register set for this CPU. Can be NULL. */
R3PTRTYPE(struct DBGFREGSET *) pHyperRegSet;
-} DBGFCPU;
-/** Pointer to DBGFCPU data. */
-typedef DBGFCPU *PDBGFCPU;
+} DBGFUSERPERVMCPU;
-int dbgfR3AsInit(PVM pVM);
-void dbgfR3AsTerm(PVM pVM);
-void dbgfR3AsRelocate(PVM pVM, RTGCUINTPTR offDelta);
+int dbgfR3AsInit(PUVM pUVM);
+void dbgfR3AsTerm(PUVM pUVM);
+void dbgfR3AsRelocate(PUVM pUVM, RTGCUINTPTR offDelta);
int dbgfR3BpInit(PVM pVM);
-int dbgfR3InfoInit(PVM pVM);
-int dbgfR3InfoTerm(PVM pVM);
-void dbgfR3OSTerm(PVM pVM);
-int dbgfR3RegInit(PVM pVM);
-void dbgfR3RegTerm(PVM pVM);
-int dbgfR3SymInit(PVM pVM);
-int dbgfR3SymTerm(PVM pVM);
+int dbgfR3InfoInit(PUVM pUVM);
+int dbgfR3InfoTerm(PUVM pUVM);
+void dbgfR3OSTerm(PUVM pUVM);
+int dbgfR3RegInit(PUVM pUVM);
+void dbgfR3RegTerm(PUVM pUVM);
int dbgfR3TraceInit(PVM pVM);
void dbgfR3TraceRelocate(PVM pVM);
void dbgfR3TraceTerm(PVM pVM);
diff --git a/src/VBox/VMM/include/EMHandleRCTmpl.h b/src/VBox/VMM/include/EMHandleRCTmpl.h
index 306e434c..ef54f34b 100644
--- a/src/VBox/VMM/include/EMHandleRCTmpl.h
+++ b/src/VBox/VMM/include/EMHandleRCTmpl.h
@@ -1,10 +1,10 @@
/* $Id: EMHandleRCTmpl.h $ */
/** @file
- * EM - emR3[Raw|Hwaccm]HandleRC template.
+ * EM - emR3[Raw|Hm]HandleRC template.
*/
/*
- * Copyright (C) 2006-2009 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -18,8 +18,13 @@
#ifndef ___EMHandleRCTmpl_h
#define ___EMHandleRCTmpl_h
+#if defined(EMHANDLERC_WITH_PATM) && defined(EMHANDLERC_WITH_HM)
+# error "Only one define"
+#endif
+
+
/**
- * Process a subset of the raw-mode and hwaccm return codes.
+ * Process a subset of the raw-mode and hm return codes.
*
* Since we have to share this with raw-mode single stepping, this inline
* function has been created to avoid code duplication.
@@ -34,8 +39,8 @@
*/
#ifdef EMHANDLERC_WITH_PATM
int emR3RawHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
-#elif defined(EMHANDLERC_WITH_HWACCM)
-int emR3HwaccmHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
+#elif defined(EMHANDLERC_WITH_HM)
+int emR3HmHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
#endif
{
switch (rc)
@@ -46,7 +51,7 @@ int emR3HwaccmHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
case VINF_SUCCESS:
break;
case VINF_EM_RESCHEDULE_RAW:
- case VINF_EM_RESCHEDULE_HWACC:
+ case VINF_EM_RESCHEDULE_HM:
case VINF_EM_RAW_INTERRUPT:
case VINF_EM_RAW_TO_R3:
case VINF_EM_RAW_TIMER_PENDING:
@@ -81,7 +86,7 @@ int emR3HwaccmHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
*/
case VINF_PATM_PATCH_TRAP_PF:
case VINF_PATM_PATCH_INT3:
- rc = emR3PatchTrap(pVM, pVCpu, pCtx, rc);
+ rc = emR3RawPatchTrap(pVM, pVCpu, pCtx, rc);
break;
case VINF_PATM_DUPLICATE_FUNCTION:
@@ -130,7 +135,7 @@ int emR3HwaccmHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
* do here is to execute the pending forced actions.
*/
case VINF_PGM_SYNC_CR3:
- AssertMsg(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL),
+ AssertMsg(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL),
("VINF_PGM_SYNC_CR3 and no VMCPU_FF_PGM_SYNC_CR3*!\n"));
rc = VINF_SUCCESS;
break;
@@ -218,16 +223,16 @@ int emR3HwaccmHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
rc = emR3ExecuteInstruction(pVM, pVCpu, "MMIO");
break;
-#ifdef EMHANDLERC_WITH_HWACCM
+#ifdef EMHANDLERC_WITH_HM
/*
* (MM)IO intensive code block detected; fall back to the recompiler for better performance
*/
case VINF_EM_RAW_EMULATE_IO_BLOCK:
- rc = HWACCMR3EmulateIoBlock(pVM, pCtx);
+ rc = HMR3EmulateIoBlock(pVM, pCtx);
break;
- case VINF_EM_HWACCM_PATCH_TPR_INSTR:
- rc = HWACCMR3PatchTprInstr(pVM, pVCpu, pCtx);
+ case VINF_EM_HM_PATCH_TPR_INSTR:
+ rc = HMR3PatchTprInstr(pVM, pVCpu, pCtx);
break;
#endif
@@ -288,7 +293,7 @@ int emR3HwaccmHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
* Conflict in GDT, resync and continue.
*/
case VINF_SELM_SYNC_GDT:
- AssertMsg(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_TSS),
+ AssertMsg(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_TSS),
("VINF_SELM_SYNC_GDT without VMCPU_FF_SELM_SYNC_GDT/LDT/TSS!\n"));
rc = VINF_SUCCESS;
break;
@@ -327,26 +332,30 @@ int emR3HwaccmHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
case VERR_TRPM_DONT_PANIC:
case VERR_TRPM_PANIC:
case VERR_VMM_RING0_ASSERTION:
+ case VINF_EM_TRIPLE_FAULT:
case VERR_VMM_HYPER_CR3_MISMATCH:
case VERR_VMM_RING3_CALL_DISABLED:
case VERR_IEM_INSTR_NOT_IMPLEMENTED:
case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
break;
-#ifdef EMHANDLERC_WITH_HWACCM
+#ifdef EMHANDLERC_WITH_HM
/*
- * Up a level, after HwAccM have done some release logging.
+ * Up a level, after Hm have done some release logging.
*/
case VERR_VMX_INVALID_VMCS_FIELD:
case VERR_VMX_INVALID_VMCS_PTR:
case VERR_VMX_INVALID_VMXON_PTR:
- case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE:
+ case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE:
case VERR_VMX_UNEXPECTED_EXCEPTION:
- case VERR_VMX_UNEXPECTED_EXIT_CODE:
+ case VERR_VMX_UNEXPECTED_EXIT:
case VERR_VMX_INVALID_GUEST_STATE:
case VERR_VMX_UNABLE_TO_START_VM:
- case VERR_VMX_UNABLE_TO_RESUME_VM:
- HWACCMR3CheckError(pVM, rc);
+ case VERR_SVM_UNKNOWN_EXIT:
+ case VERR_SVM_UNEXPECTED_EXIT:
+ case VERR_SVM_UNEXPECTED_PATCH_TYPE:
+ case VERR_SVM_UNEXPECTED_XCPT_EXIT:
+ HMR3CheckError(pVM, rc);
break;
/* Up a level; fatal */
diff --git a/src/VBox/VMM/include/EMInternal.h b/src/VBox/VMM/include/EMInternal.h
index 34606b33..11a7ac3c 100644
--- a/src/VBox/VMM/include/EMInternal.h
+++ b/src/VBox/VMM/include/EMInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -38,7 +38,8 @@ RT_C_DECLS_BEGIN
*/
/** The saved state version. */
-#define EM_SAVED_STATE_VERSION 4
+#define EM_SAVED_STATE_VERSION 5
+#define EM_SAVED_STATE_VERSION_PRE_IEM 4
#define EM_SAVED_STATE_VERSION_PRE_MWAIT 3
#define EM_SAVED_STATE_VERSION_PRE_SMP 2
@@ -264,11 +265,15 @@ typedef struct EMSTATS
/** @name Privileged Instructions Ending Up In HC.
* @{ */
- STAMCOUNTER StatCli;
- STAMCOUNTER StatSti;
- STAMCOUNTER StatIn;
STAMCOUNTER StatIoRestarted;
+#ifdef VBOX_WITH_FIRST_IEM_STEP
+ STAMCOUNTER StatIoIem;
+#else
+ STAMCOUNTER StatIn;
STAMCOUNTER StatOut;
+#endif
+ STAMCOUNTER StatCli;
+ STAMCOUNTER StatSti;
STAMCOUNTER StatInvlpg;
STAMCOUNTER StatHlt;
STAMCOUNTER StatMovReadCR[DISCREG_CR4 + 1];
@@ -307,6 +312,13 @@ typedef struct EM
* See EM2VM(). */
RTUINT offVM;
+ /** Whether IEM executes everything. */
+ bool fIemExecutesAll;
+ /** Whether a triple fault triggers a guru. */
+ bool fGuruOnTripleFault;
+ /** Alignment padding. */
+ bool afPadding[6];
+
/** Id of the VCPU that last executed code in the recompiler. */
VMCPUID idLastRemCpu;
@@ -326,10 +338,6 @@ typedef EM *PEM;
*/
typedef struct EMCPU
{
- /** Offset to the VM structure.
- * See EMCPU2VM(). */
- RTUINT offVMCPU;
-
/** Execution Manager State. */
EMSTATE volatile enmState;
@@ -343,11 +351,17 @@ typedef struct EMCPU
uint8_t u8Padding[3];
+ /** The number of instructions we've executed in IEM since switching to the
+ * EMSTATE_IEM_THEN_REM state. */
+ uint32_t cIemThenRemInstructions;
+
/** Inhibit interrupts for this instruction. Valid only when VM_FF_INHIBIT_INTERRUPTS is set. */
RTGCUINTPTR GCPtrInhibitInterrupts;
+#ifdef VBOX_WITH_RAW_MODE
/** Pointer to the PATM status structure. (R3 Ptr) */
R3PTRTYPE(PPATMGCSTATE) pPatmGCState;
+#endif
/** Pointer to the guest CPUM state. (R3 Ptr) */
R3PTRTYPE(PCPUMCTX) pCtx;
@@ -397,8 +411,10 @@ typedef struct EMCPU
STAMPROFILE StatForcedActions;
STAMPROFILE StatHalted;
STAMPROFILEADV StatCapped;
- STAMPROFILEADV StatHwAccEntry;
- STAMPROFILE StatHwAccExec;
+ STAMPROFILEADV StatHmEntry;
+ STAMPROFILE StatHmExec;
+ STAMPROFILE StatIEMEmu;
+ STAMPROFILE StatIEMThenREM;
STAMPROFILE StatREMEmu;
STAMPROFILE StatREMExec;
STAMPROFILE StatREMSync;
@@ -414,8 +430,8 @@ typedef struct EMCPU
STAMPROFILE StatIOEmu;
/** R3: Profiling of emR3RawPrivileged. */
STAMPROFILE StatPrivEmu;
- /** R3: Number of time emR3HwAccExecute is called. */
- STAMCOUNTER StatHwAccExecuteEntry;
+ /** R3: Number of time emR3HmExecute is called. */
+ STAMCOUNTER StatHmExecuteEntry;
/** More statistics (R3). */
R3PTRTYPE(PEMSTATS) pStatsR3;
@@ -440,11 +456,12 @@ typedef EMCPU *PEMCPU;
/** @} */
+int emR3InitDbg(PVM pVM);
-int emR3HwAccExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
+int emR3HmExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
int emR3RawHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc);
-int emR3HwaccmHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc);
+int emR3HmHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc);
EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
@@ -452,6 +469,7 @@ int emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc);
int emR3RawResumeHyper(PVM pVM, PVMCPU pVCpu);
int emR3RawStep(PVM pVM, PVMCPU pVCpu);
int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations);
+bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu);
RT_C_DECLS_END
diff --git a/src/VBox/VMM/include/HMInternal.h b/src/VBox/VMM/include/HMInternal.h
new file mode 100644
index 00000000..fc1c953f
--- /dev/null
+++ b/src/VBox/VMM/include/HMInternal.h
@@ -0,0 +1,1002 @@
+/* $Id: HMInternal.h $ */
+/** @file
+ * HM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2006-2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef ___HMInternal_h
+#define ___HMInternal_h
+
+#include <VBox/cdefs.h>
+#include <VBox/types.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/dis.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/hm_vmx.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/cpum.h>
+#include <iprt/memobj.h>
+#include <iprt/cpuset.h>
+#include <iprt/mp.h>
+#include <iprt/avl.h>
+
+#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
+/* Enable 64 bits guest support. */
+# define VBOX_ENABLE_64_BITS_GUESTS
+#endif
+
+#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
+# define VMX_USE_CACHED_VMCS_ACCESSES
+#endif
+
+/** @def HM_PROFILE_EXIT_DISPATCH
+ * Enables profiling of the VM exit handler dispatching. */
+#if 0
+# define HM_PROFILE_EXIT_DISPATCH
+#endif
+
+/* The MSR auto load/store used to not work for KERNEL_GS_BASE MSR, thus we
+ * used to handle this MSR manually. See @bugref{6208}. This was clearly visible while
+ * booting Solaris 11 (11.1 b19) VMs with 2 Cpus. This is no longer the case and we
+ * always auto load/store the KERNEL_GS_BASE MSR.
+ *
+ * Note: don't forget to update the assembly files while modifying this!
+ */
+/** @todo This define should always be in effect and the define itself removed
+ after 'sufficient' testing. */
+# define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
+
+RT_C_DECLS_BEGIN
+
+
+/** @defgroup grp_hm_int Internal
+ * @ingroup grp_hm
+ * @internal
+ * @{
+ */
+
+/** @def HMCPU_CF_CLEAR
+ * Clears a HM-context flag for the given VCPU.
+ *
+ * @param pVCpu Pointer to the VMCPU.
+ * @param fFlag The flag to clear.
+ */
+#define HMCPU_CF_CLEAR(pVCpu, fFlag) (ASMAtomicUoAndU32(&(pVCpu)->hm.s.fContextUseFlags, ~(fFlag)))
+
+/** @def VMCPU_FF_SET
+ * Sets a HM-context flag for the given VCPU.
+ *
+ * @param pVCpu Pointer to the VMCPU.
+ * @param fFlag The flag to set.
+ */
+#define HMCPU_CF_SET(pVCpu, fFlag) (ASMAtomicUoOrU32(&(pVCpu)->hm.s.fContextUseFlags, (fFlag)))
+
+/** @def HMCPU_CF_IS_SET
+ * Checks if all the flags in the specified HM-context set is pending.
+ *
+ * @param pVCpu Pointer to the VMCPU.
+ * @param fFlag The flag to check.
+ */
+#define HMCPU_CF_IS_SET(pVCpu, fFlag) ((ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & (fFlag)) == (fFlag))
+
+/** @def HMCPU_CF_IS_PENDING
+ * Checks if one or more of the flags in the specified HM-context set is
+ * pending.
+ *
+ * @param pVCpu Pointer to the VMCPU.
+ * @param fFlags The flags to check for.
+ */
+#define HMCPU_CF_IS_PENDING(pVCpu, fFlags) RT_BOOL(ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & (fFlags))
+
+/** @def HMCPU_CF_IS_PENDING_ONLY
+ * Checks if -only- one or more of the specified HM-context flags is pending.
+ *
+ * @param pVCpu Pointer to the VMCPU.
+ * @param fFlags The flags to check for.
+ */
+#define HMCPU_CF_IS_PENDING_ONLY(pVCpu, fFlags) !RT_BOOL(ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & ~(fFlags))
+
+/** @def HMCPU_CF_IS_SET_ONLY
+ * Checks if -only- all the flags in the specified HM-context set is pending.
+ *
+ * @param pVCpu Pointer to the VMCPU.
+ * @param fFlags The flags to check for.
+ */
+#define HMCPU_CF_IS_SET_ONLY(pVCpu, fFlags) (ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) == (fFlags))
+
+/** @def HMCPU_CF_RESET_TO
+ * Resets the HM-context flags to the specified value.
+ *
+ * @param pVCpu Pointer to the VMCPU.
+ * @param fFlags The new value.
+ */
+#define HMCPU_CF_RESET_TO(pVCpu, fFlags) (ASMAtomicUoWriteU32(&(pVCpu)->hm.s.fContextUseFlags, (fFlags)))
+
+/** @def HMCPU_CF_VALUE
+ * Returns the current HM-context flags value.
+ *
+ * @param pVCpu Pointer to the VMCPU.
+ */
+#define HMCPU_CF_VALUE(pVCpu) (ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags))
+
+
+/** Maximum number of exit reason statistics counters. */
+#define MAX_EXITREASON_STAT 0x100
+#define MASK_EXITREASON_STAT 0xff
+#define MASK_INJECT_IRQ_STAT 0xff
+
+/** @name HM changed flags.
+ * These flags are used to keep track of which important registers that
+ * have been changed since last they were reset.
+ * @{
+ */
+#define HM_CHANGED_GUEST_CR0 RT_BIT(0)
+#define HM_CHANGED_GUEST_CR3 RT_BIT(1)
+#define HM_CHANGED_GUEST_CR4 RT_BIT(2)
+#define HM_CHANGED_GUEST_GDTR RT_BIT(3)
+#define HM_CHANGED_GUEST_IDTR RT_BIT(4)
+#define HM_CHANGED_GUEST_LDTR RT_BIT(5)
+#define HM_CHANGED_GUEST_TR RT_BIT(6)
+#define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(7)
+#define HM_CHANGED_GUEST_DEBUG RT_BIT(8)
+#define HM_CHANGED_GUEST_RIP RT_BIT(9)
+#define HM_CHANGED_GUEST_RSP RT_BIT(10)
+#define HM_CHANGED_GUEST_RFLAGS RT_BIT(11)
+#define HM_CHANGED_GUEST_CR2 RT_BIT(12)
+#define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(13)
+#define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(14)
+#define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(15)
+/* VT-x specific state. */
+#define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(16)
+#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(17)
+#define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(18)
+#define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(19)
+#define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(20)
+/* AMD-V specific state. */
+#define HM_CHANGED_SVM_GUEST_EFER_MSR RT_BIT(16)
+#define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(17)
+#define HM_CHANGED_SVM_RESERVED1 RT_BIT(18)
+#define HM_CHANGED_SVM_RESERVED2 RT_BIT(19)
+#define HM_CHANGED_SVM_RESERVED3 RT_BIT(20)
+
+#define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \
+ | HM_CHANGED_GUEST_CR3 \
+ | HM_CHANGED_GUEST_CR4 \
+ | HM_CHANGED_GUEST_GDTR \
+ | HM_CHANGED_GUEST_IDTR \
+ | HM_CHANGED_GUEST_LDTR \
+ | HM_CHANGED_GUEST_TR \
+ | HM_CHANGED_GUEST_SEGMENT_REGS \
+ | HM_CHANGED_GUEST_DEBUG \
+ | HM_CHANGED_GUEST_RIP \
+ | HM_CHANGED_GUEST_RSP \
+ | HM_CHANGED_GUEST_RFLAGS \
+ | HM_CHANGED_GUEST_CR2 \
+ | HM_CHANGED_GUEST_SYSENTER_CS_MSR \
+ | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \
+ | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \
+ | HM_CHANGED_VMX_GUEST_AUTO_MSRS \
+ | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
+ | HM_CHANGED_VMX_GUEST_APIC_STATE \
+ | HM_CHANGED_VMX_ENTRY_CTLS \
+ | HM_CHANGED_VMX_EXIT_CTLS)
+
+#define HM_CHANGED_HOST_CONTEXT RT_BIT(21)
+
+/* Bits shared between host and guest. */
+#define HM_CHANGED_HOST_GUEST_SHARED_STATE ( HM_CHANGED_GUEST_CR0 \
+ | HM_CHANGED_GUEST_DEBUG)
+/** @} */
+
+/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
+#define HM_MAX_TLB_SHOOTDOWN_PAGES 8
+
+/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
+#define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
+/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
+#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * PAGE_SIZE + 1)
+/** Total guest mapped memory needed. */
+#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
+
+/** Enable for TPR guest patching. */
+#define VBOX_HM_WITH_GUEST_PATCHING
+
+/** HM SSM version
+ */
+#ifdef VBOX_HM_WITH_GUEST_PATCHING
+# define HM_SSM_VERSION 5
+# define HM_SSM_VERSION_NO_PATCHING 4
+#else
+# define HM_SSM_VERSION 4
+# define HM_SSM_VERSION_NO_PATCHING 4
+#endif
+#define HM_SSM_VERSION_2_0_X 3
+
+/**
+ * Global per-cpu information. (host)
+ */
+typedef struct HMGLOBALCPUINFO
+{
+ /** The CPU ID. */
+ RTCPUID idCpu;
+ /** The memory object */
+ RTR0MEMOBJ hMemObj;
+ /** Current ASID (AMD-V) / VPID (Intel). */
+ uint32_t uCurrentAsid;
+ /** TLB flush count. */
+ uint32_t cTlbFlushes;
+ /** Whether to flush each new ASID/VPID before use. */
+ bool fFlushAsidBeforeUse;
+ /** Configured for VT-x or AMD-V. */
+ bool fConfigured;
+ /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
+ bool fIgnoreAMDVInUseError;
+ /** In use by our code. (for power suspend) */
+ volatile bool fInUse;
+} HMGLOBALCPUINFO;
+/** Pointer to the per-cpu global information. */
+typedef HMGLOBALCPUINFO *PHMGLOBALCPUINFO;
+
+typedef enum
+{
+ HMPENDINGIO_INVALID = 0,
+ HMPENDINGIO_PORT_READ,
+ HMPENDINGIO_PORT_WRITE,
+ HMPENDINGIO_STRING_READ,
+ HMPENDINGIO_STRING_WRITE,
+ /** The usual 32-bit paranoia. */
+ HMPENDINGIO_32BIT_HACK = 0x7fffffff
+} HMPENDINGIO;
+
+
+typedef enum
+{
+ HMTPRINSTR_INVALID,
+ HMTPRINSTR_READ,
+ HMTPRINSTR_READ_SHR4,
+ HMTPRINSTR_WRITE_REG,
+ HMTPRINSTR_WRITE_IMM,
+ HMTPRINSTR_JUMP_REPLACEMENT,
+ /** The usual 32-bit paranoia. */
+ HMTPRINSTR_32BIT_HACK = 0x7fffffff
+} HMTPRINSTR;
+
+typedef struct
+{
+ /** The key is the address of patched instruction. (32 bits GC ptr) */
+ AVLOU32NODECORE Core;
+ /** Original opcode. */
+ uint8_t aOpcode[16];
+ /** Instruction size. */
+ uint32_t cbOp;
+ /** Replacement opcode. */
+ uint8_t aNewOpcode[16];
+ /** Replacement instruction size. */
+ uint32_t cbNewOp;
+ /** Instruction type. */
+ HMTPRINSTR enmType;
+ /** Source operand. */
+ uint32_t uSrcOperand;
+ /** Destination operand. */
+ uint32_t uDstOperand;
+ /** Number of times the instruction caused a fault. */
+ uint32_t cFaults;
+ /** Patch address of the jump replacement. */
+ RTGCPTR32 pJumpTarget;
+} HMTPRPATCH;
+/** Pointer to HMTPRPATCH. */
+typedef HMTPRPATCH *PHMTPRPATCH;
+
+/**
+ * Switcher function, HC to the special 64-bit RC.
+ *
+ * @param pVM Pointer to the VM.
+ * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
+ * @returns Return code indicating the action to take.
+ */
+typedef DECLCALLBACK(int) FNHMSWITCHERHC(PVM pVM, uint32_t offCpumVCpu);
+/** Pointer to switcher function. */
+typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
+
+/**
+ * HM VM Instance data.
+ * Changes to this must checked against the padding of the hm union in VM!
+ */
+typedef struct HM
+{
+ /** Set when we've initialized VMX or SVM. */
+ bool fInitialized;
+
+ /** Set if nested paging is enabled. */
+ bool fNestedPaging;
+
+ /** Set if nested paging is allowed. */
+ bool fAllowNestedPaging;
+
+ /** Set if large pages are enabled (requires nested paging). */
+ bool fLargePages;
+
+ /** Set if we can support 64-bit guests or not. */
+ bool fAllow64BitGuests;
+
+ /** Set if an IO-APIC is configured for this VM. */
+ bool fHasIoApic;
+
+ /** Set when TPR patching is allowed. */
+ bool fTRPPatchingAllowed;
+
+ /** Set when we initialize VT-x or AMD-V once for all CPUs. */
+ bool fGlobalInit;
+
+ /** Set when TPR patching is active. */
+ bool fTPRPatchingActive;
+ bool u8Alignment[7];
+
+ /** Maximum ASID allowed. */
+ uint32_t uMaxAsid;
+
+ /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
+ * This number is set much higher when RTThreadPreemptIsPending is reliable. */
+ uint32_t cMaxResumeLoops;
+
+ /** Guest allocated memory for patching purposes. */
+ RTGCPTR pGuestPatchMem;
+ /** Current free pointer inside the patch block. */
+ RTGCPTR pFreeGuestPatchMem;
+ /** Size of the guest patch memory block. */
+ uint32_t cbGuestPatchMem;
+ uint32_t uPadding1;
+
+#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
+ /** 32 to 64 bits switcher entrypoint. */
+ R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0;
+ RTR0PTR uPadding2;
+#endif
+
+ struct
+ {
+ /** Set by the ring-0 side of HM to indicate VMX is supported by the
+ * CPU. */
+ bool fSupported;
+
+ /** Set when we've enabled VMX. */
+ bool fEnabled;
+
+ /** Set if VPID is supported. */
+ bool fVpid;
+
+ /** Set if VT-x VPID is allowed. */
+ bool fAllowVpid;
+
+ /** Set if unrestricted guest execution is in use (real and protected mode without paging). */
+ bool fUnrestrictedGuest;
+
+ /** Set if unrestricted guest execution is allowed to be used. */
+ bool fAllowUnrestricted;
+
+ /** Whether we're using the preemption timer or not. */
+ bool fUsePreemptTimer;
+ /** The shift mask employed by the VMX-Preemption timer. */
+ uint8_t cPreemptTimerShift;
+
+ /** Virtual address of the TSS page used for real mode emulation. */
+ R3PTRTYPE(PVBOXTSS) pRealModeTSS;
+
+ /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
+ R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
+
+ /** R0 memory object for the APIC-access page. */
+ RTR0MEMOBJ hMemObjApicAccess;
+ /** Physical address of the APIC-access page. */
+ RTHCPHYS HCPhysApicAccess;
+ /** Virtual address of the APIC-access page. */
+ R0PTRTYPE(uint8_t *) pbApicAccess;
+
+#ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ RTR0MEMOBJ hMemObjScratch;
+ RTHCPHYS HCPhysScratch;
+ R0PTRTYPE(uint8_t *) pbScratch;
+#endif
+
+ /** Internal Id of which flush-handler to use for tagged-TLB entries. */
+ unsigned uFlushTaggedTlb;
+
+#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
+ uint32_t u32Alignment;
+#endif
+ /** Host CR4 value (set by ring-0 VMX init) */
+ uint64_t u64HostCr4;
+
+ /** Host EFER value (set by ring-0 VMX init) */
+ uint64_t u64HostEfer;
+
+ /** VMX MSR values */
+ VMXMSRS Msrs;
+
+ /** Flush types for invept & invvpid; they depend on capabilities. */
+ VMX_FLUSH_EPT enmFlushEpt;
+ VMX_FLUSH_VPID enmFlushVpid;
+ } vmx;
+
+ struct
+ {
+ /** Set by the ring-0 side of HM to indicate SVM is supported by the
+ * CPU. */
+ bool fSupported;
+ /** Set when we've enabled SVM. */
+ bool fEnabled;
+ /** Set if erratum 170 affects the AMD cpu. */
+ bool fAlwaysFlushTLB;
+ /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
+ bool fIgnoreInUseError;
+
+ /** R0 memory object for the IO bitmap (12kb). */
+ RTR0MEMOBJ hMemObjIOBitmap;
+ /** Physical address of the IO bitmap (12kb). */
+ RTHCPHYS HCPhysIOBitmap;
+ /** Virtual address of the IO bitmap. */
+ R0PTRTYPE(void *) pvIOBitmap;
+
+ /* HWCR MSR (for diagnostics) */
+ uint64_t u64MsrHwcr;
+
+ /** SVM revision. */
+ uint32_t u32Rev;
+
+ /** SVM feature bits from cpuid 0x8000000a */
+ uint32_t u32Features;
+ } svm;
+
+ /**
+ * AVL tree with all patches (active or disabled) sorted by guest instruction address
+ */
+ AVLOU32TREE PatchTree;
+ uint32_t cPatches;
+ HMTPRPATCH aPatches[64];
+
+ struct
+ {
+ uint32_t u32AMDFeatureECX;
+ uint32_t u32AMDFeatureEDX;
+ } cpuid;
+
+ /** Saved error from detection */
+ int32_t lLastError;
+
+ /** HMR0Init was run */
+ bool fHMR0Init;
+ bool u8Alignment1[7];
+
+ STAMCOUNTER StatTprPatchSuccess;
+ STAMCOUNTER StatTprPatchFailure;
+ STAMCOUNTER StatTprReplaceSuccess;
+ STAMCOUNTER StatTprReplaceFailure;
+} HM;
+/** Pointer to HM VM instance data. */
+typedef HM *PHM;
+
+/* Maximum number of cached entries. */
+#define VMCSCACHE_MAX_ENTRY 128
+
+/* Structure for storing read and write VMCS actions. */
+typedef struct VMCSCACHE
+{
+#ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ /* Magic marker for searching in crash dumps. */
+ uint8_t aMagic[16];
+ uint64_t uMagic;
+ uint64_t u64TimeEntry;
+ uint64_t u64TimeSwitch;
+ uint64_t cResume;
+ uint64_t interPD;
+ uint64_t pSwitcher;
+ uint32_t uPos;
+ uint32_t idCpu;
+#endif
+ /* CR2 is saved here for EPT syncing. */
+ uint64_t cr2;
+ struct
+ {
+ uint32_t cValidEntries;
+ uint32_t uAlignment;
+ uint32_t aField[VMCSCACHE_MAX_ENTRY];
+ uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
+ } Write;
+ struct
+ {
+ uint32_t cValidEntries;
+ uint32_t uAlignment;
+ uint32_t aField[VMCSCACHE_MAX_ENTRY];
+ uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
+ } Read;
+#ifdef VBOX_STRICT
+ struct
+ {
+ RTHCPHYS HCPhysCpuPage;
+ RTHCPHYS HCPhysVmcs;
+ RTGCPTR pCache;
+ RTGCPTR pCtx;
+ } TestIn;
+ struct
+ {
+ RTHCPHYS HCPhysVmcs;
+ RTGCPTR pCache;
+ RTGCPTR pCtx;
+ uint64_t eflags;
+ uint64_t cr8;
+ } TestOut;
+ struct
+ {
+ uint64_t param1;
+ uint64_t param2;
+ uint64_t param3;
+ uint64_t param4;
+ } ScratchPad;
+#endif
+} VMCSCACHE;
+/** Pointer to VMCSCACHE. */
+typedef VMCSCACHE *PVMCSCACHE;
+
+/** VMX StartVM function. */
+typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
+/** Pointer to a VMX StartVM function. */
+typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
+
+/** SVM VMRun function. */
+typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
+/** Pointer to a SVM VMRun function. */
+typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
+
+/**
+ * HM VMCPU Instance data.
+ */
+typedef struct HMCPU
+{
+ /** Set if we need to flush the TLB during the world switch. */
+ bool fForceTLBFlush;
+ /** Set when we're using VT-x or AMD-V at that moment. */
+ bool fActive;
+ /** Set when the TLB has been checked until we return from the world switch. */
+ volatile bool fCheckedTLBFlush;
+ /** Whether we're executing a single instruction. */
+ bool fSingleInstruction;
+ /** Set if we need to clear the trap flag because of single stepping. */
+ bool fClearTrapFlag;
+ /** Whether we've completed the inner HM leave function. */
+ bool fLeaveDone;
+ /** Whether we're using the hyper DR7 or guest DR7. */
+ bool fUsingHyperDR7;
+ uint8_t abAlignment[1];
+
+ /** World switch exit counter. */
+ volatile uint32_t cWorldSwitchExits;
+ /** HM_CHANGED_* flags. */
+ volatile uint32_t fContextUseFlags;
+ /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first
+ * time). */
+ RTCPUID idLastCpu;
+ /** TLB flush count. */
+ uint32_t cTlbFlushes;
+ /** Current ASID in use by the VM. */
+ uint32_t uCurrentAsid;
+ /** An additional error code used for some gurus. */
+ uint32_t u32HMError;
+ /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
+ uint64_t u64HostTscAux;
+
+ struct
+ {
+ /** Physical address of the VM control structure (VMCS). */
+ RTHCPHYS HCPhysVmcs;
+ /** R0 memory object for the VM control structure (VMCS). */
+ RTR0MEMOBJ hMemObjVmcs;
+ /** Virtual address of the VM control structure (VMCS). */
+ R0PTRTYPE(void *) pvVmcs;
+ /** Ring 0 handlers for VT-x. */
+ PFNHMVMXSTARTVM pfnStartVM;
+#if HC_ARCH_BITS == 32
+ uint32_t u32Alignment1;
+#endif
+
+ /** Current VMX_VMCS32_CTRL_PIN_EXEC. */
+ uint32_t u32PinCtls;
+ /** Current VMX_VMCS32_CTRL_PROC_EXEC. */
+ uint32_t u32ProcCtls;
+ /** Current VMX_VMCS32_CTRL_PROC_EXEC2. */
+ uint32_t u32ProcCtls2;
+ /** Current VMX_VMCS32_CTRL_EXIT. */
+ uint32_t u32ExitCtls;
+ /** Current VMX_VMCS32_CTRL_ENTRY. */
+ uint32_t u32EntryCtls;
+
+ /** Physical address of the virtual APIC page for TPR caching. */
+ RTHCPHYS HCPhysVirtApic;
+ /** R0 memory object for the virtual APIC page for TPR caching. */
+ RTR0MEMOBJ hMemObjVirtApic;
+ /** Virtual address of the virtual APIC page for TPR caching. */
+ R0PTRTYPE(uint8_t *) pbVirtApic;
+#if HC_ARCH_BITS == 32
+ uint32_t u32Alignment2;
+#endif
+
+ /** Current CR0 mask. */
+ uint32_t u32CR0Mask;
+ /** Current CR4 mask. */
+ uint32_t u32CR4Mask;
+ /** Current exception bitmap. */
+ uint32_t u32XcptBitmap;
+ /** The updated-guest-state mask. */
+ volatile uint32_t fUpdatedGuestState;
+ /** Current EPTP. */
+ RTHCPHYS HCPhysEPTP;
+
+ /** Physical address of the MSR bitmap. */
+ RTHCPHYS HCPhysMsrBitmap;
+ /** R0 memory object for the MSR bitmap. */
+ RTR0MEMOBJ hMemObjMsrBitmap;
+ /** Virtual address of the MSR bitmap. */
+ R0PTRTYPE(void *) pvMsrBitmap;
+
+#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
+ /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
+ * for guest MSRs). */
+ RTHCPHYS HCPhysGuestMsr;
+ /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
+ * (used for guest MSRs). */
+ RTR0MEMOBJ hMemObjGuestMsr;
+ /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
+ * for guest MSRs). */
+ R0PTRTYPE(void *) pvGuestMsr;
+
+ /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
+ RTHCPHYS HCPhysHostMsr;
+ /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
+ RTR0MEMOBJ hMemObjHostMsr;
+ /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
+ R0PTRTYPE(void *) pvHostMsr;
+
+ /** Number of automatically loaded/restored guest MSRs during the world switch. */
+ uint32_t cGuestMsrs;
+ uint32_t uAlignment;
+#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
+
+ /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
+ uint64_t u64MsrApicBase;
+ /** Last use TSC offset value. (cached) */
+ uint64_t u64TSCOffset;
+
+ /** VMCS cache. */
+ VMCSCACHE VMCSCache;
+
+ /** Real-mode emulation state. */
+ struct
+ {
+ X86DESCATTR AttrCS;
+ X86DESCATTR AttrDS;
+ X86DESCATTR AttrES;
+ X86DESCATTR AttrFS;
+ X86DESCATTR AttrGS;
+ X86DESCATTR AttrSS;
+ X86EFLAGS Eflags;
+ uint32_t fRealOnV86Active;
+ } RealMode;
+
+ struct
+ {
+ uint64_t u64VMCSPhys;
+ uint32_t u32VMCSRevision;
+ uint32_t u32InstrError;
+ uint32_t u32ExitReason;
+ RTCPUID idEnteredCpu;
+ RTCPUID idCurrentCpu;
+ uint32_t u32Padding;
+ } LastError;
+
+ /** State of the VMCS. */
+ uint32_t uVmcsState;
+ /** Which host-state bits to restore before being preempted. */
+ uint32_t fRestoreHostFlags;
+ /** The host-state restoration structure. */
+ VMXRESTOREHOST RestoreHost;
+ /** Set if guest was executing in real mode (extra checks). */
+ bool fWasInRealMode;
+ /** Padding. */
+ uint32_t u32Padding;
+ } vmx;
+
+ struct
+ {
+ /** R0 memory object for the host VMCB which holds additional host-state. */
+ RTR0MEMOBJ hMemObjVmcbHost;
+ /** Physical address of the host VMCB which holds additional host-state. */
+ RTHCPHYS HCPhysVmcbHost;
+ /** Virtual address of the host VMCB which holds additional host-state. */
+ R0PTRTYPE(void *) pvVmcbHost;
+
+ /** R0 memory object for the guest VMCB. */
+ RTR0MEMOBJ hMemObjVmcb;
+ /** Physical address of the guest VMCB. */
+ RTHCPHYS HCPhysVmcb;
+ /** Virtual address of the guest VMCB. */
+ R0PTRTYPE(void *) pvVmcb;
+
+ /** Ring 0 handlers for VT-x. */
+ PFNHMSVMVMRUN pfnVMRun;
+
+ /** R0 memory object for the MSR bitmap (8 KB). */
+ RTR0MEMOBJ hMemObjMsrBitmap;
+ /** Physical address of the MSR bitmap (8 KB). */
+ RTHCPHYS HCPhysMsrBitmap;
+ /** Virtual address of the MSR bitmap. */
+ R0PTRTYPE(void *) pvMsrBitmap;
+
+ /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
+ * we should check if the VTPR changed on every VM-exit. */
+ bool fSyncVTpr;
+ uint8_t u8Align[7];
+
+ /** Alignment padding. */
+ uint32_t u32Padding;
+ } svm;
+
+ /** Event injection state. */
+ struct
+ {
+ uint32_t fPending;
+ uint32_t u32ErrCode;
+ uint32_t cbInstr;
+ uint32_t u32Padding; /**< Explicit alignment padding. */
+ uint64_t u64IntInfo;
+ RTGCUINTPTR GCPtrFaultAddress;
+ } Event;
+
+ /** IO Block emulation state. */
+ struct
+ {
+ bool fEnabled;
+ uint8_t u8Align[7];
+
+ /** RIP at the start of the io code we wish to emulate in the recompiler. */
+ RTGCPTR GCPtrFunctionEip;
+
+ uint64_t cr0;
+ } EmulateIoBlock;
+
+ struct
+ {
+ /** Pending IO operation type. */
+ HMPENDINGIO enmType;
+ uint32_t uPadding;
+ RTGCPTR GCPtrRip;
+ RTGCPTR GCPtrRipNext;
+ union
+ {
+ struct
+ {
+ uint32_t uPort;
+ uint32_t uAndVal;
+ uint32_t cbSize;
+ } Port;
+ uint64_t aRaw[2];
+ } s;
+ } PendingIO;
+
+ /** The PAE PDPEs used with Nested Paging (only valid when
+ * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
+ X86PDPE aPdpes[4];
+
+ /** Current shadow paging mode. */
+ PGMMODE enmShadowMode;
+
+ /** The CPU ID of the CPU currently owning the VMCS. Set in
+ * HMR0Enter and cleared in HMR0Leave. */
+ RTCPUID idEnteredCpu;
+
+ /** To keep track of pending TLB shootdown pages. (SMP guest only) */
+ struct
+ {
+ RTGCPTR aPages[HM_MAX_TLB_SHOOTDOWN_PAGES];
+ uint32_t cPages;
+ uint32_t u32Padding; /**< Explicit alignment padding. */
+ } TlbShootdown;
+
+ /** For saving stack space, the disassembler state is allocated here instead of
+ * on the stack. */
+ DISCPUSTATE DisState;
+
+ STAMPROFILEADV StatEntry;
+ STAMPROFILEADV StatExit1;
+ STAMPROFILEADV StatExit2;
+ STAMPROFILEADV StatExitIO;
+ STAMPROFILEADV StatExitMovCRx;
+ STAMPROFILEADV StatExitXcptNmi;
+ STAMPROFILEADV StatLoadGuestState;
+ STAMPROFILEADV StatInGC;
+
+#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
+ STAMPROFILEADV StatWorldSwitch3264;
+#endif
+ STAMPROFILEADV StatPoke;
+ STAMPROFILEADV StatSpinPoke;
+ STAMPROFILEADV StatSpinPokeFailed;
+
+ STAMCOUNTER StatInjectInterrupt;
+ STAMCOUNTER StatInjectXcpt;
+ STAMCOUNTER StatInjectPendingReflect;
+
+ STAMCOUNTER StatExitAll;
+ STAMCOUNTER StatExitShadowNM;
+ STAMCOUNTER StatExitGuestNM;
+ STAMCOUNTER StatExitShadowPF; /* Misleading, currently used for MMIO #PFs as well. */
+ STAMCOUNTER StatExitShadowPFEM;
+ STAMCOUNTER StatExitGuestPF;
+ STAMCOUNTER StatExitGuestUD;
+ STAMCOUNTER StatExitGuestSS;
+ STAMCOUNTER StatExitGuestNP;
+ STAMCOUNTER StatExitGuestGP;
+ STAMCOUNTER StatExitGuestDE;
+ STAMCOUNTER StatExitGuestDB;
+ STAMCOUNTER StatExitGuestMF;
+ STAMCOUNTER StatExitGuestBP;
+ STAMCOUNTER StatExitGuestXF;
+ STAMCOUNTER StatExitGuestXcpUnk;
+ STAMCOUNTER StatExitInvlpg;
+ STAMCOUNTER StatExitInvd;
+ STAMCOUNTER StatExitWbinvd;
+ STAMCOUNTER StatExitPause;
+ STAMCOUNTER StatExitCpuid;
+ STAMCOUNTER StatExitRdtsc;
+ STAMCOUNTER StatExitRdtscp;
+ STAMCOUNTER StatExitRdpmc;
+ STAMCOUNTER StatExitRdrand;
+ STAMCOUNTER StatExitCli;
+ STAMCOUNTER StatExitSti;
+ STAMCOUNTER StatExitPushf;
+ STAMCOUNTER StatExitPopf;
+ STAMCOUNTER StatExitIret;
+ STAMCOUNTER StatExitInt;
+ STAMCOUNTER StatExitCRxWrite[16];
+ STAMCOUNTER StatExitCRxRead[16];
+ STAMCOUNTER StatExitDRxWrite;
+ STAMCOUNTER StatExitDRxRead;
+ STAMCOUNTER StatExitRdmsr;
+ STAMCOUNTER StatExitWrmsr;
+ STAMCOUNTER StatExitClts;
+ STAMCOUNTER StatExitXdtrAccess;
+ STAMCOUNTER StatExitHlt;
+ STAMCOUNTER StatExitMwait;
+ STAMCOUNTER StatExitMonitor;
+ STAMCOUNTER StatExitLmsw;
+ STAMCOUNTER StatExitIOWrite;
+ STAMCOUNTER StatExitIORead;
+ STAMCOUNTER StatExitIOStringWrite;
+ STAMCOUNTER StatExitIOStringRead;
+ STAMCOUNTER StatExitIntWindow;
+ STAMCOUNTER StatExitMaxResume;
+ STAMCOUNTER StatExitExtInt;
+ STAMCOUNTER StatExitHostNmiInGC;
+ STAMCOUNTER StatExitPreemptTimer;
+ STAMCOUNTER StatExitTprBelowThreshold;
+ STAMCOUNTER StatExitTaskSwitch;
+ STAMCOUNTER StatExitMtf;
+ STAMCOUNTER StatExitApicAccess;
+ STAMCOUNTER StatPendingHostIrq;
+
+ STAMCOUNTER StatPreemptPreempting;
+ STAMCOUNTER StatPreemptSaveHostState;
+
+ STAMCOUNTER StatFlushPage;
+ STAMCOUNTER StatFlushPageManual;
+ STAMCOUNTER StatFlushPhysPageManual;
+ STAMCOUNTER StatFlushTlb;
+ STAMCOUNTER StatFlushTlbManual;
+ STAMCOUNTER StatFlushTlbWorldSwitch;
+ STAMCOUNTER StatNoFlushTlbWorldSwitch;
+ STAMCOUNTER StatFlushEntire;
+ STAMCOUNTER StatFlushAsid;
+ STAMCOUNTER StatFlushNestedPaging;
+ STAMCOUNTER StatFlushTlbInvlpgVirt;
+ STAMCOUNTER StatFlushTlbInvlpgPhys;
+ STAMCOUNTER StatTlbShootdown;
+ STAMCOUNTER StatTlbShootdownFlush;
+
+ STAMCOUNTER StatSwitchGuestIrq;
+ STAMCOUNTER StatSwitchHmToR3FF;
+ STAMCOUNTER StatSwitchExitToR3;
+ STAMCOUNTER StatSwitchLongJmpToR3;
+
+ STAMCOUNTER StatTscOffset;
+ STAMCOUNTER StatTscIntercept;
+ STAMCOUNTER StatTscInterceptOverFlow;
+
+ STAMCOUNTER StatExitReasonNpf;
+ STAMCOUNTER StatDRxArmed;
+ STAMCOUNTER StatDRxContextSwitch;
+ STAMCOUNTER StatDRxIoCheck;
+
+ STAMCOUNTER StatLoadMinimal;
+ STAMCOUNTER StatLoadFull;
+
+ STAMCOUNTER StatVmxCheckBadRmSelBase;
+ STAMCOUNTER StatVmxCheckBadRmSelLimit;
+ STAMCOUNTER StatVmxCheckRmOk;
+
+ STAMCOUNTER StatVmxCheckBadSel;
+ STAMCOUNTER StatVmxCheckBadRpl;
+ STAMCOUNTER StatVmxCheckBadLdt;
+ STAMCOUNTER StatVmxCheckBadTr;
+ STAMCOUNTER StatVmxCheckPmOk;
+
+#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
+ STAMCOUNTER StatFpu64SwitchBack;
+ STAMCOUNTER StatDebug64SwitchBack;
+#endif
+
+#ifdef VBOX_WITH_STATISTICS
+ R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
+ R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
+ R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
+ R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
+#endif
+#ifdef HM_PROFILE_EXIT_DISPATCH
+ STAMPROFILEADV StatExitDispatch;
+#endif
+} HMCPU;
+/** Pointer to HM VM instance data. */
+typedef HMCPU *PHMCPU;
+
+
+#ifdef IN_RING0
+
+VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpu(void);
+VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu);
+
+
+#ifdef VBOX_STRICT
+VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
+VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
+#else
+# define HMDumpRegs(a, b ,c) do { } while (0)
+# define HMR0DumpDescriptor(a, b, c) do { } while (0)
+#endif
+
+# ifdef VBOX_WITH_KERNEL_USING_XMM
+DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
+DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
+# endif
+
+# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
+/**
+ * Gets 64-bit GDTR and IDTR on darwin.
+ * @param pGdtr Where to store the 64-bit GDTR.
+ * @param pIdtr Where to store the 64-bit IDTR.
+ */
+DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
+
+/**
+ * Gets 64-bit CR3 on darwin.
+ * @returns CR3
+ */
+DECLASM(uint64_t) HMR0Get64bitCR3(void);
+# endif
+
+#endif /* IN_RING0 */
+
+/** @} */
+
+RT_C_DECLS_END
+
+#endif
+
diff --git a/src/VBox/VMM/include/HWACCMInternal.mac b/src/VBox/VMM/include/HMInternal.mac
index f8eb76d0..02ae3315 100644
--- a/src/VBox/VMM/include/HWACCMInternal.mac
+++ b/src/VBox/VMM/include/HMInternal.mac
@@ -1,9 +1,9 @@
-;$Id: HWACCMInternal.mac $
+;$Id: HMInternal.mac $
;; @file
-; HWACCM - Internal header file.
+; HM - Internal header file.
;
;
-; Copyright (C) 2006-2007 Oracle Corporation
+; Copyright (C) 2006-2012 Oracle Corporation
;
; This file is part of VirtualBox Open Source Edition (OSE), as
; available from http://www.virtualbox.org. This file is free software;
@@ -14,7 +14,12 @@
; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
;
-%define VMX_USE_CACHED_VMCS_ACCESSES
+%if HC_ARCH_BITS == 32
+ %ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
+ %define VMX_USE_CACHED_VMCS_ACCESSES
+ %endif
+%endif
+
%define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
;Maximum number of cached entries.
@@ -42,12 +47,12 @@ struc VMCSCACHE
.Read.uAlignment resd 1
.Read.aField resd VMCSCACHE_MAX_ENTRY
.Read.aFieldVal resq VMCSCACHE_MAX_ENTRY
-%ifdef DEBUG
+%ifdef VBOX_STRICT
.TestIn.HCPhysCpuPage resq 1
- .TestIn.HCPhysVMCS resq 1
+ .TestIn.HCPhysVmcs resq 1
.TestIn.pCache resq 1
.TestIn.pCtx resq 1
- .TestOut.HCPhysVMCS resq 1
+ .TestOut.HCPhysVmcs resq 1
.TestOut.pCache resq 1
.TestOut.pCtx resq 1
.TestOut.eflags resq 1
diff --git a/src/VBox/VMM/include/HWACCMInternal.h b/src/VBox/VMM/include/HWACCMInternal.h
deleted file mode 100644
index 8d41fa89..00000000
--- a/src/VBox/VMM/include/HWACCMInternal.h
+++ /dev/null
@@ -1,906 +0,0 @@
-/* $Id: HWACCMInternal.h $ */
-/** @file
- * HM - Internal header file.
- */
-
-/*
- * Copyright (C) 2006-2012 Oracle Corporation
- *
- * This file is part of VirtualBox Open Source Edition (OSE), as
- * available from http://www.virtualbox.org. This file is free software;
- * you can redistribute it and/or modify it under the terms of the GNU
- * General Public License (GPL) as published by the Free Software
- * Foundation, in version 2 as it comes in the "COPYING" file of the
- * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
- * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
- */
-
-#ifndef ___HWACCMInternal_h
-#define ___HWACCMInternal_h
-
-#include <VBox/cdefs.h>
-#include <VBox/types.h>
-#include <VBox/vmm/em.h>
-#include <VBox/vmm/stam.h>
-#include <VBox/dis.h>
-#include <VBox/vmm/hwaccm.h>
-#include <VBox/vmm/hwacc_vmx.h>
-#include <VBox/vmm/pgm.h>
-#include <VBox/vmm/cpum.h>
-#include <iprt/memobj.h>
-#include <iprt/cpuset.h>
-#include <iprt/mp.h>
-#include <iprt/avl.h>
-
-#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
-/* Enable 64 bits guest support. */
-# define VBOX_ENABLE_64_BITS_GUESTS
-#endif
-
-#define VMX_USE_CACHED_VMCS_ACCESSES
-#define HWACCM_VMX_EMULATE_REALMODE
-
-/* The MSR auto load/store does not work for KERNEL_GS_BASE MSR, thus we
- * handle this MSR manually. See @bugref{6208}. This is clearly visible while
- * booting Solaris 11 (11.1 b19) VMs with 2 Cpus.
- *
- * Note: don't forget to update the assembly files while modifying this!
- */
-# define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
-
-RT_C_DECLS_BEGIN
-
-
-/** @defgroup grp_hwaccm_int Internal
- * @ingroup grp_hwaccm
- * @internal
- * @{
- */
-
-
-/** Maximum number of exit reason statistics counters. */
-#define MAX_EXITREASON_STAT 0x100
-#define MASK_EXITREASON_STAT 0xff
-#define MASK_INJECT_IRQ_STAT 0xff
-
-/** @name Changed flags
- * These flags are used to keep track of which important registers that
- * have been changed since last they were reset.
- * @{
- */
-#define HWACCM_CHANGED_GUEST_FPU RT_BIT(0)
-#define HWACCM_CHANGED_GUEST_CR0 RT_BIT(1)
-#define HWACCM_CHANGED_GUEST_CR3 RT_BIT(2)
-#define HWACCM_CHANGED_GUEST_CR4 RT_BIT(3)
-#define HWACCM_CHANGED_GUEST_GDTR RT_BIT(4)
-#define HWACCM_CHANGED_GUEST_IDTR RT_BIT(5)
-#define HWACCM_CHANGED_GUEST_LDTR RT_BIT(6)
-#define HWACCM_CHANGED_GUEST_TR RT_BIT(7)
-#define HWACCM_CHANGED_GUEST_MSR RT_BIT(8)
-#define HWACCM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(9)
-#define HWACCM_CHANGED_GUEST_DEBUG RT_BIT(10)
-#define HWACCM_CHANGED_HOST_CONTEXT RT_BIT(11)
-
-#define HWACCM_CHANGED_ALL ( HWACCM_CHANGED_GUEST_SEGMENT_REGS \
- | HWACCM_CHANGED_GUEST_CR0 \
- | HWACCM_CHANGED_GUEST_CR3 \
- | HWACCM_CHANGED_GUEST_CR4 \
- | HWACCM_CHANGED_GUEST_GDTR \
- | HWACCM_CHANGED_GUEST_IDTR \
- | HWACCM_CHANGED_GUEST_LDTR \
- | HWACCM_CHANGED_GUEST_TR \
- | HWACCM_CHANGED_GUEST_MSR \
- | HWACCM_CHANGED_GUEST_FPU \
- | HWACCM_CHANGED_GUEST_DEBUG \
- | HWACCM_CHANGED_HOST_CONTEXT)
-
-#define HWACCM_CHANGED_ALL_GUEST ( HWACCM_CHANGED_GUEST_SEGMENT_REGS \
- | HWACCM_CHANGED_GUEST_CR0 \
- | HWACCM_CHANGED_GUEST_CR3 \
- | HWACCM_CHANGED_GUEST_CR4 \
- | HWACCM_CHANGED_GUEST_GDTR \
- | HWACCM_CHANGED_GUEST_IDTR \
- | HWACCM_CHANGED_GUEST_LDTR \
- | HWACCM_CHANGED_GUEST_TR \
- | HWACCM_CHANGED_GUEST_MSR \
- | HWACCM_CHANGED_GUEST_DEBUG \
- | HWACCM_CHANGED_GUEST_FPU)
-
-/** @} */
-
-/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
-#define HWACCM_MAX_TLB_SHOOTDOWN_PAGES 8
-
-/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
-#define HWACCM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
-/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
-#define HWACCM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2*PAGE_SIZE + 1)
-/** Total guest mapped memory needed. */
-#define HWACCM_VTX_TOTAL_DEVHEAP_MEM (HWACCM_EPT_IDENTITY_PG_TABLE_SIZE + HWACCM_VTX_TSS_SIZE)
-
-/** Enable for TPR guest patching. */
-#define VBOX_HWACCM_WITH_GUEST_PATCHING
-
-/** HWACCM SSM version
- */
-#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
-# define HWACCM_SSM_VERSION 5
-# define HWACCM_SSM_VERSION_NO_PATCHING 4
-#else
-# define HWACCM_SSM_VERSION 4
-# define HWACCM_SSM_VERSION_NO_PATCHING 4
-#endif
-#define HWACCM_SSM_VERSION_2_0_X 3
-
-/**
- * Global per-cpu information. (host)
- */
-typedef struct HMGLOBLCPUINFO
-{
- /** The CPU ID. */
- RTCPUID idCpu;
- /** The memory object */
- RTR0MEMOBJ hMemObj;
- /** Current ASID (AMD-V) / VPID (Intel). */
- uint32_t uCurrentASID;
- /** TLB flush count. */
- uint32_t cTLBFlushes;
- /** Whether to flush each new ASID/VPID before use. */
- bool fFlushASIDBeforeUse;
- /** Configured for VT-x or AMD-V. */
- bool fConfigured;
- /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
- bool fIgnoreAMDVInUseError;
- /** In use by our code. (for power suspend) */
- volatile bool fInUse;
-} HMGLOBLCPUINFO;
-/** Pointer to the per-cpu global information. */
-typedef HMGLOBLCPUINFO *PHMGLOBLCPUINFO;
-
-typedef enum
-{
- HWACCMPENDINGIO_INVALID = 0,
- HWACCMPENDINGIO_PORT_READ,
- HWACCMPENDINGIO_PORT_WRITE,
- HWACCMPENDINGIO_STRING_READ,
- HWACCMPENDINGIO_STRING_WRITE,
- /** The usual 32-bit paranoia. */
- HWACCMPENDINGIO_32BIT_HACK = 0x7fffffff
-} HWACCMPENDINGIO;
-
-
-typedef enum
-{
- HWACCMTPRINSTR_INVALID,
- HWACCMTPRINSTR_READ,
- HWACCMTPRINSTR_READ_SHR4,
- HWACCMTPRINSTR_WRITE_REG,
- HWACCMTPRINSTR_WRITE_IMM,
- HWACCMTPRINSTR_JUMP_REPLACEMENT,
- /** The usual 32-bit paranoia. */
- HWACCMTPRINSTR_32BIT_HACK = 0x7fffffff
-} HWACCMTPRINSTR;
-
-typedef struct
-{
- /** The key is the address of patched instruction. (32 bits GC ptr) */
- AVLOU32NODECORE Core;
- /** Original opcode. */
- uint8_t aOpcode[16];
- /** Instruction size. */
- uint32_t cbOp;
- /** Replacement opcode. */
- uint8_t aNewOpcode[16];
- /** Replacement instruction size. */
- uint32_t cbNewOp;
- /** Instruction type. */
- HWACCMTPRINSTR enmType;
- /** Source operand. */
- uint32_t uSrcOperand;
- /** Destination operand. */
- uint32_t uDstOperand;
- /** Number of times the instruction caused a fault. */
- uint32_t cFaults;
- /** Patch address of the jump replacement. */
- RTGCPTR32 pJumpTarget;
-} HWACCMTPRPATCH;
-/** Pointer to HWACCMTPRPATCH. */
-typedef HWACCMTPRPATCH *PHWACCMTPRPATCH;
-
-/**
- * Switcher function, HC to RC.
- *
- * @param pVM Pointer to the VM.
- * @param uOffsetVMCPU VMCPU offset from pVM
- * @returns Return code indicating the action to take.
- */
-typedef DECLCALLBACK (int) FNHWACCMSWITCHERHC(PVM pVM, uint32_t uOffsetVMCPU);
-/** Pointer to switcher function. */
-typedef FNHWACCMSWITCHERHC *PFNHWACCMSWITCHERHC;
-
-/**
- * HWACCM VM Instance data.
- * Changes to this must checked against the padding of the hwaccm union in VM!
- */
-typedef struct HWACCM
-{
- /** Set when we've initialized VMX or SVM. */
- bool fInitialized;
-
- /** Set when hardware acceleration is allowed. */
- bool fAllowed;
-
- /** Set if nested paging is enabled. */
- bool fNestedPaging;
-
- /** Set if nested paging is allowed. */
- bool fAllowNestedPaging;
-
- /** Set if large pages are enabled (requires nested paging). */
- bool fLargePages;
-
- /** Set if we can support 64-bit guests or not. */
- bool fAllow64BitGuests;
-
- /** Set if an IO-APIC is configured for this VM. */
- bool fHasIoApic;
-
- /** Set when TPR patching is allowed. */
- bool fTRPPatchingAllowed;
-
- /** Set when we initialize VT-x or AMD-V once for all CPUs. */
- bool fGlobalInit;
-
- /** Set when TPR patching is active. */
- bool fTPRPatchingActive;
- bool u8Alignment[6];
-
- /** And mask for copying register contents. */
- uint64_t u64RegisterMask;
-
- /** Maximum ASID allowed. */
- uint32_t uMaxASID;
-
- /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
- * This number is set much higher when RTThreadPreemptIsPending is reliable. */
- uint32_t cMaxResumeLoops;
-
- /** Guest allocated memory for patching purposes. */
- RTGCPTR pGuestPatchMem;
- /** Current free pointer inside the patch block. */
- RTGCPTR pFreeGuestPatchMem;
- /** Size of the guest patch memory block. */
- uint32_t cbGuestPatchMem;
- uint32_t uPadding1;
-
-#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
- /** 32 to 64 bits switcher entrypoint. */
- R0PTRTYPE(PFNHWACCMSWITCHERHC) pfnHost32ToGuest64R0;
-
- /* AMD-V 64 bits vmrun handler */
- RTRCPTR pfnSVMGCVMRun64;
-
- /* VT-x 64 bits vmlaunch handler */
- RTRCPTR pfnVMXGCStartVM64;
-
- /* RC handler to setup the 64 bits FPU state. */
- RTRCPTR pfnSaveGuestFPU64;
-
- /* RC handler to setup the 64 bits debug state. */
- RTRCPTR pfnSaveGuestDebug64;
-
- /* Test handler */
- RTRCPTR pfnTest64;
-
- RTRCPTR uAlignment[2];
-/*#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
- uint32_t u32Alignment[1]; */
-#endif
-
- struct
- {
- /** Set by the ring-0 side of HWACCM to indicate VMX is supported by the
- * CPU. */
- bool fSupported;
-
- /** Set when we've enabled VMX. */
- bool fEnabled;
-
- /** Set if VPID is supported. */
- bool fVPID;
-
- /** Set if VT-x VPID is allowed. */
- bool fAllowVPID;
-
- /** Set if unrestricted guest execution is allowed (real and protected mode without paging). */
- bool fUnrestrictedGuest;
-
- /** Whether we're using the preemption timer or not. */
- bool fUsePreemptTimer;
- /** The shift mask employed by the VMX-Preemption timer. */
- uint8_t cPreemptTimerShift;
-
- bool uAlignment[1];
-
- /** Virtual address of the TSS page used for real mode emulation. */
- R3PTRTYPE(PVBOXTSS) pRealModeTSS;
-
- /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
- R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
-
- /** R0 memory object for the APIC physical page (serves for filtering accesses). */
- RTR0MEMOBJ pMemObjAPIC;
- /** Physical address of the APIC physical page (serves for filtering accesses). */
- RTHCPHYS pAPICPhys;
- /** Virtual address of the APIC physical page (serves for filtering accesses). */
- R0PTRTYPE(uint8_t *) pAPIC;
-
- /** R0 memory object for the MSR entry load page (guest MSRs). */
- RTR0MEMOBJ pMemObjMSREntryLoad;
- /** Physical address of the MSR entry load page (guest MSRs). */
- RTHCPHYS pMSREntryLoadPhys;
- /** Virtual address of the MSR entry load page (guest MSRs). */
- R0PTRTYPE(uint8_t *) pMSREntryLoad;
-
-#ifdef VBOX_WITH_CRASHDUMP_MAGIC
- RTR0MEMOBJ pMemObjScratch;
- RTHCPHYS pScratchPhys;
- R0PTRTYPE(uint8_t *) pScratch;
-#endif
- /** R0 memory object for the MSR exit store page (guest MSRs). */
- RTR0MEMOBJ pMemObjMSRExitStore;
- /** Physical address of the MSR exit store page (guest MSRs). */
- RTHCPHYS pMSRExitStorePhys;
- /** Virtual address of the MSR exit store page (guest MSRs). */
- R0PTRTYPE(uint8_t *) pMSRExitStore;
-
- /** R0 memory object for the MSR exit load page (host MSRs). */
- RTR0MEMOBJ pMemObjMSRExitLoad;
- /** Physical address of the MSR exit load page (host MSRs). */
- RTHCPHYS pMSRExitLoadPhys;
- /** Virtual address of the MSR exit load page (host MSRs). */
- R0PTRTYPE(uint8_t *) pMSRExitLoad;
-
- /** Ring 0 handlers for VT-x. */
- DECLR0CALLBACKMEMBER(void, pfnSetupTaggedTLB, (PVM pVM, PVMCPU pVCpu));
-
-#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
- uint32_t u32Alignment;
-#endif
- /** Host CR4 value (set by ring-0 VMX init) */
- uint64_t hostCR4;
-
- /** Host EFER value (set by ring-0 VMX init) */
- uint64_t hostEFER;
-
- /** VMX MSR values */
- struct
- {
- uint64_t feature_ctrl;
- uint64_t vmx_basic_info;
- VMX_CAPABILITY vmx_pin_ctls;
- VMX_CAPABILITY vmx_proc_ctls;
- VMX_CAPABILITY vmx_proc_ctls2;
- VMX_CAPABILITY vmx_exit;
- VMX_CAPABILITY vmx_entry;
- uint64_t vmx_misc;
- uint64_t vmx_cr0_fixed0;
- uint64_t vmx_cr0_fixed1;
- uint64_t vmx_cr4_fixed0;
- uint64_t vmx_cr4_fixed1;
- uint64_t vmx_vmcs_enum;
- uint64_t vmx_eptcaps;
- } msr;
-
- /** Flush types for invept & invvpid; they depend on capabilities. */
- VMX_FLUSH_EPT enmFlushEPT;
- VMX_FLUSH_VPID enmFlushVPID;
- } vmx;
-
- struct
- {
- /** Set by the ring-0 side of HWACCM to indicate SVM is supported by the
- * CPU. */
- bool fSupported;
- /** Set when we've enabled SVM. */
- bool fEnabled;
- /** Set if erratum 170 affects the AMD cpu. */
- bool fAlwaysFlushTLB;
- /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
- bool fIgnoreInUseError;
-
- /** R0 memory object for the IO bitmap (12kb). */
- RTR0MEMOBJ pMemObjIOBitmap;
- /** Physical address of the IO bitmap (12kb). */
- RTHCPHYS pIOBitmapPhys;
- /** Virtual address of the IO bitmap. */
- R0PTRTYPE(void *) pIOBitmap;
-
- /* HWCR msr (for diagnostics) */
- uint64_t msrHWCR;
-
- /** SVM revision. */
- uint32_t u32Rev;
-
- /** SVM feature bits from cpuid 0x8000000a */
- uint32_t u32Features;
- } svm;
-
- /**
- * AVL tree with all patches (active or disabled) sorted by guest instruction address
- */
- AVLOU32TREE PatchTree;
- uint32_t cPatches;
- HWACCMTPRPATCH aPatches[64];
-
- struct
- {
- uint32_t u32AMDFeatureECX;
- uint32_t u32AMDFeatureEDX;
- } cpuid;
-
- /** Saved error from detection */
- int32_t lLastError;
-
- /** HWACCMR0Init was run */
- bool fHWACCMR0Init;
- bool u8Alignment1[7];
-
- STAMCOUNTER StatTPRPatchSuccess;
- STAMCOUNTER StatTPRPatchFailure;
- STAMCOUNTER StatTPRReplaceSuccess;
- STAMCOUNTER StatTPRReplaceFailure;
-} HWACCM;
-/** Pointer to HWACCM VM instance data. */
-typedef HWACCM *PHWACCM;
-
-/* Maximum number of cached entries. */
-#define VMCSCACHE_MAX_ENTRY 128
-
-/* Structure for storing read and write VMCS actions. */
-typedef struct VMCSCACHE
-{
-#ifdef VBOX_WITH_CRASHDUMP_MAGIC
- /* Magic marker for searching in crash dumps. */
- uint8_t aMagic[16];
- uint64_t uMagic;
- uint64_t u64TimeEntry;
- uint64_t u64TimeSwitch;
- uint64_t cResume;
- uint64_t interPD;
- uint64_t pSwitcher;
- uint32_t uPos;
- uint32_t idCpu;
-#endif
- /* CR2 is saved here for EPT syncing. */
- uint64_t cr2;
- struct
- {
- uint32_t cValidEntries;
- uint32_t uAlignment;
- uint32_t aField[VMCSCACHE_MAX_ENTRY];
- uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
- } Write;
- struct
- {
- uint32_t cValidEntries;
- uint32_t uAlignment;
- uint32_t aField[VMCSCACHE_MAX_ENTRY];
- uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
- } Read;
-#ifdef DEBUG
- struct
- {
- RTHCPHYS HCPhysCpuPage;
- RTHCPHYS HCPhysVMCS;
- RTGCPTR pCache;
- RTGCPTR pCtx;
- } TestIn;
- struct
- {
- RTHCPHYS HCPhysVMCS;
- RTGCPTR pCache;
- RTGCPTR pCtx;
- uint64_t eflags;
- uint64_t cr8;
- } TestOut;
- struct
- {
- uint64_t param1;
- uint64_t param2;
- uint64_t param3;
- uint64_t param4;
- } ScratchPad;
-#endif
-} VMCSCACHE;
-/** Pointer to VMCSCACHE. */
-typedef VMCSCACHE *PVMCSCACHE;
-
-/** VMX StartVM function. */
-typedef DECLCALLBACK(int) FNHWACCMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
-/** Pointer to a VMX StartVM function. */
-typedef R0PTRTYPE(FNHWACCMVMXSTARTVM *) PFNHWACCMVMXSTARTVM;
-
-/** SVM VMRun function. */
-typedef DECLCALLBACK(int) FNHWACCMSVMVMRUN(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
-/** Pointer to a SVM VMRun function. */
-typedef R0PTRTYPE(FNHWACCMSVMVMRUN *) PFNHWACCMSVMVMRUN;
-
-/**
- * HWACCM VMCPU Instance data.
- */
-typedef struct HWACCMCPU
-{
- /** Old style FPU reporting trap mask override performed (optimization) */
- bool fFPUOldStyleOverride;
-
- /** Set if we don't have to flush the TLB on VM entry. */
- bool fResumeVM;
-
- /** Set if we need to flush the TLB during the world switch. */
- bool fForceTLBFlush;
-
- /** Set when we're using VT-x or AMD-V at that moment. */
- bool fActive;
-
- /** Set when the TLB has been checked until we return from the world switch. */
- volatile bool fCheckedTLBFlush;
- uint8_t bAlignment[3];
-
- /** World switch exit counter. */
- volatile uint32_t cWorldSwitchExits;
-
- /** HWACCM_CHANGED_* flags. */
- uint32_t fContextUseFlags;
-
- /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */
- RTCPUID idLastCpu;
-
- /** TLB flush count */
- uint32_t cTLBFlushes;
-
- /** Current ASID in use by the VM */
- uint32_t uCurrentASID;
-
- uint32_t u32Alignment;
-
- /* Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
- uint64_t u64HostTSCAux;
-
- struct
- {
- /** Physical address of the VM control structure (VMCS). */
- RTHCPHYS HCPhysVMCS;
- /** R0 memory object for the VM control structure (VMCS). */
- RTR0MEMOBJ hMemObjVMCS;
- /** Virtual address of the VM control structure (VMCS). */
- R0PTRTYPE(void *) pvVMCS;
-
- /** Ring 0 handlers for VT-x. */
- PFNHWACCMVMXSTARTVM pfnStartVM;
-
-#if HC_ARCH_BITS == 32
- uint32_t u32Alignment;
-#endif
-
- /** Current VMX_VMCS_CTRL_PROC_EXEC_CONTROLS. */
- uint64_t proc_ctls;
-
- /** Current VMX_VMCS_CTRL_PROC_EXEC2_CONTROLS. */
- uint64_t proc_ctls2;
-
- /** Physical address of the virtual APIC page for TPR caching. */
- RTHCPHYS HCPhysVAPIC;
- /** R0 memory object for the virtual APIC page for TPR caching. */
- RTR0MEMOBJ hMemObjVAPIC;
- /** Virtual address of the virtual APIC page for TPR caching. */
- R0PTRTYPE(uint8_t *) pbVAPIC;
-
- /** Current CR0 mask. */
- uint64_t cr0_mask;
- /** Current CR4 mask. */
- uint64_t cr4_mask;
-
- /** Current EPTP. */
- RTHCPHYS GCPhysEPTP;
-
- /** Physical address of the MSR bitmap (1 page). */
- RTHCPHYS pMSRBitmapPhys;
- /** R0 memory object for the MSR bitmap (1 page). */
- RTR0MEMOBJ pMemObjMSRBitmap;
- /** Virtual address of the MSR bitmap (1 page). */
- R0PTRTYPE(uint8_t *) pMSRBitmap;
-
-#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
- /** Physical address of the guest MSR load area (1 page). */
- RTHCPHYS pGuestMSRPhys;
- /** R0 memory object for the guest MSR load area (1 page). */
- RTR0MEMOBJ pMemObjGuestMSR;
- /** Virtual address of the guest MSR load area (1 page). */
- R0PTRTYPE(uint8_t *) pGuestMSR;
-
- /** Physical address of the MSR load area (1 page). */
- RTHCPHYS pHostMSRPhys;
- /** R0 memory object for the MSR load area (1 page). */
- RTR0MEMOBJ pMemObjHostMSR;
- /** Virtual address of the MSR load area (1 page). */
- R0PTRTYPE(uint8_t *) pHostMSR;
-
- /* Number of automatically loaded/restored guest MSRs during the world switch. */
- uint32_t cCachedMSRs;
- uint32_t uAlignment;
-#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
-
- /* Last use TSC offset value. (cached) */
- uint64_t u64TSCOffset;
-
- /** VMCS cache. */
- VMCSCACHE VMCSCache;
-
- /** Real-mode emulation state. */
- struct
- {
- X86EFLAGS eflags;
- uint32_t fValid;
- } RealMode;
-
- struct
- {
- uint64_t u64VMCSPhys;
- uint32_t ulVMCSRevision;
- uint32_t ulInstrError;
- uint32_t ulExitReason;
- RTCPUID idEnteredCpu;
- RTCPUID idCurrentCpu;
- uint32_t padding;
- } lasterror;
-
- /** The last seen guest paging mode (by VT-x). */
- PGMMODE enmLastSeenGuestMode;
- /** Current guest paging mode (as seen by HWACCMR3PagingModeChanged). */
- PGMMODE enmCurrGuestMode;
- /** Previous guest paging mode (as seen by HWACCMR3PagingModeChanged). */
- PGMMODE enmPrevGuestMode;
- } vmx;
-
- struct
- {
- /** R0 memory object for the host VM control block (VMCB). */
- RTR0MEMOBJ pMemObjVMCBHost;
- /** Physical address of the host VM control block (VMCB). */
- RTHCPHYS pVMCBHostPhys;
- /** Virtual address of the host VM control block (VMCB). */
- R0PTRTYPE(void *) pVMCBHost;
-
- /** R0 memory object for the VM control block (VMCB). */
- RTR0MEMOBJ pMemObjVMCB;
- /** Physical address of the VM control block (VMCB). */
- RTHCPHYS pVMCBPhys;
- /** Virtual address of the VM control block (VMCB). */
- R0PTRTYPE(void *) pVMCB;
-
- /** Ring 0 handlers for VT-x. */
- PFNHWACCMSVMVMRUN pfnVMRun;
-
- /** R0 memory object for the MSR bitmap (8kb). */
- RTR0MEMOBJ pMemObjMSRBitmap;
- /** Physical address of the MSR bitmap (8kb). */
- RTHCPHYS pMSRBitmapPhys;
- /** Virtual address of the MSR bitmap. */
- R0PTRTYPE(void *) pMSRBitmap;
- } svm;
-
- /** Event injection state. */
- struct
- {
- uint32_t fPending;
- uint32_t errCode;
- uint64_t intInfo;
- } Event;
-
- /** IO Block emulation state. */
- struct
- {
- bool fEnabled;
- uint8_t u8Align[7];
-
- /** RIP at the start of the io code we wish to emulate in the recompiler. */
- RTGCPTR GCPtrFunctionEip;
-
- uint64_t cr0;
- } EmulateIoBlock;
-
- struct
- {
- /* Pending IO operation type. */
- HWACCMPENDINGIO enmType;
- uint32_t uPadding;
- RTGCPTR GCPtrRip;
- RTGCPTR GCPtrRipNext;
- union
- {
- struct
- {
- unsigned uPort;
- unsigned uAndVal;
- unsigned cbSize;
- } Port;
- uint64_t aRaw[2];
- } s;
- } PendingIO;
-
- /** Currently shadow paging mode. */
- PGMMODE enmShadowMode;
-
- /** The CPU ID of the CPU currently owning the VMCS. Set in
- * HWACCMR0Enter and cleared in HWACCMR0Leave. */
- RTCPUID idEnteredCpu;
-
- /** To keep track of pending TLB shootdown pages. (SMP guest only) */
- struct
- {
- RTGCPTR aPages[HWACCM_MAX_TLB_SHOOTDOWN_PAGES];
- unsigned cPages;
- } TlbShootdown;
-
- /** For saving stack space, the disassembler state is allocated here instead of
- * on the stack. */
- DISCPUSTATE DisState;
-
- uint32_t padding2[1];
-
- STAMPROFILEADV StatEntry;
- STAMPROFILEADV StatExit1;
- STAMPROFILEADV StatExit2;
-#if 1 /* temporary for tracking down darwin issues. */
- STAMPROFILEADV StatExit2Sub1;
- STAMPROFILEADV StatExit2Sub2;
- STAMPROFILEADV StatExit2Sub3;
-#endif
- STAMPROFILEADV StatInGC;
-
-#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
- STAMPROFILEADV StatWorldSwitch3264;
-#endif
- STAMPROFILEADV StatPoke;
- STAMPROFILEADV StatSpinPoke;
- STAMPROFILEADV StatSpinPokeFailed;
-
- STAMCOUNTER StatIntInject;
-
- STAMCOUNTER StatExitShadowNM;
- STAMCOUNTER StatExitGuestNM;
- STAMCOUNTER StatExitShadowPF;
- STAMCOUNTER StatExitShadowPFEM;
- STAMCOUNTER StatExitGuestPF;
- STAMCOUNTER StatExitGuestUD;
- STAMCOUNTER StatExitGuestSS;
- STAMCOUNTER StatExitGuestNP;
- STAMCOUNTER StatExitGuestGP;
- STAMCOUNTER StatExitGuestDE;
- STAMCOUNTER StatExitGuestDB;
- STAMCOUNTER StatExitGuestMF;
- STAMCOUNTER StatExitGuestBP;
- STAMCOUNTER StatExitGuestXF;
- STAMCOUNTER StatExitGuestXcpUnk;
- STAMCOUNTER StatExitInvlpg;
- STAMCOUNTER StatExitInvd;
- STAMCOUNTER StatExitCpuid;
- STAMCOUNTER StatExitRdtsc;
- STAMCOUNTER StatExitRdtscp;
- STAMCOUNTER StatExitRdpmc;
- STAMCOUNTER StatExitCli;
- STAMCOUNTER StatExitSti;
- STAMCOUNTER StatExitPushf;
- STAMCOUNTER StatExitPopf;
- STAMCOUNTER StatExitIret;
- STAMCOUNTER StatExitInt;
- STAMCOUNTER StatExitCRxWrite[16];
- STAMCOUNTER StatExitCRxRead[16];
- STAMCOUNTER StatExitDRxWrite;
- STAMCOUNTER StatExitDRxRead;
- STAMCOUNTER StatExitRdmsr;
- STAMCOUNTER StatExitWrmsr;
- STAMCOUNTER StatExitCLTS;
- STAMCOUNTER StatExitHlt;
- STAMCOUNTER StatExitMwait;
- STAMCOUNTER StatExitMonitor;
- STAMCOUNTER StatExitLMSW;
- STAMCOUNTER StatExitIOWrite;
- STAMCOUNTER StatExitIORead;
- STAMCOUNTER StatExitIOStringWrite;
- STAMCOUNTER StatExitIOStringRead;
- STAMCOUNTER StatExitIrqWindow;
- STAMCOUNTER StatExitMaxResume;
- STAMCOUNTER StatExitPreemptPending;
- STAMCOUNTER StatExitMTF;
- STAMCOUNTER StatIntReinject;
- STAMCOUNTER StatPendingHostIrq;
-
- STAMCOUNTER StatFlushPage;
- STAMCOUNTER StatFlushPageManual;
- STAMCOUNTER StatFlushPhysPageManual;
- STAMCOUNTER StatFlushTLB;
- STAMCOUNTER StatFlushTLBManual;
- STAMCOUNTER StatFlushPageInvlpg;
- STAMCOUNTER StatFlushTLBWorldSwitch;
- STAMCOUNTER StatNoFlushTLBWorldSwitch;
- STAMCOUNTER StatFlushTLBCRxChange;
- STAMCOUNTER StatFlushASID;
- STAMCOUNTER StatFlushTLBInvlpga;
- STAMCOUNTER StatTlbShootdown;
- STAMCOUNTER StatTlbShootdownFlush;
-
- STAMCOUNTER StatSwitchGuestIrq;
- STAMCOUNTER StatSwitchToR3;
-
- STAMCOUNTER StatTSCOffset;
- STAMCOUNTER StatTSCIntercept;
- STAMCOUNTER StatTSCInterceptOverFlow;
-
- STAMCOUNTER StatExitReasonNPF;
- STAMCOUNTER StatDRxArmed;
- STAMCOUNTER StatDRxContextSwitch;
- STAMCOUNTER StatDRxIOCheck;
-
- STAMCOUNTER StatLoadMinimal;
- STAMCOUNTER StatLoadFull;
-
-#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
- STAMCOUNTER StatFpu64SwitchBack;
- STAMCOUNTER StatDebug64SwitchBack;
-#endif
-
-#ifdef VBOX_WITH_STATISTICS
- R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
- R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
- R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
- R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
-#endif
-} HWACCMCPU;
-/** Pointer to HWACCM VM instance data. */
-typedef HWACCMCPU *PHWACCMCPU;
-
-
-#ifdef IN_RING0
-
-VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpu(void);
-VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu);
-
-
-#ifdef VBOX_STRICT
-VMMR0DECL(void) HWACCMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
-VMMR0DECL(void) HWACCMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
-#else
-# define HWACCMDumpRegs(a, b ,c) do { } while (0)
-# define HWACCMR0DumpDescriptor(a, b, c) do { } while (0)
-#endif
-
-# ifdef VBOX_WITH_KERNEL_USING_XMM
-DECLASM(int) hwaccmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM);
-DECLASM(int) hwaccmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHWACCMSVMVMRUN pfnVMRun);
-# endif
-
-# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
-/**
- * Gets 64-bit GDTR and IDTR on darwin.
- * @param pGdtr Where to store the 64-bit GDTR.
- * @param pIdtr Where to store the 64-bit IDTR.
- */
-DECLASM(void) hwaccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
-
-/**
- * Gets 64-bit CR3 on darwin.
- * @returns CR3
- */
-DECLASM(uint64_t) hwaccmR0Get64bitCR3(void);
-# endif
-
-#endif /* IN_RING0 */
-
-/** @} */
-
-RT_C_DECLS_END
-
-#endif
-
diff --git a/src/VBox/VMM/include/IEMInternal.h b/src/VBox/VMM/include/IEMInternal.h
index 45803eaf..7b1b25a3 100644
--- a/src/VBox/VMM/include/IEMInternal.h
+++ b/src/VBox/VMM/include/IEMInternal.h
@@ -18,8 +18,9 @@
#ifndef ___IEMInternal_h
#define ___IEMInternal_h
-#include <VBox/vmm/stam.h>
#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/iem.h>
+#include <VBox/vmm/stam.h>
#include <VBox/param.h>
@@ -51,17 +52,6 @@ typedef RTFLOAT32U const *PCRTFLOAT32U;
/**
- * Operand or addressing mode.
- */
-typedef enum IEMMODE
-{
- IEMMODE_16BIT = 0,
- IEMMODE_32BIT,
- IEMMODE_64BIT
-} IEMMODE;
-AssertCompileSize(IEMMODE, 4);
-
-/**
* Extended operand mode that includes a representation of 8-bit.
*
* This is used for packing down modes when invoking some C instruction
@@ -213,8 +203,10 @@ typedef struct IEMCPU
/** Whether to bypass access handlers or not. */
bool fBypassHandlers;
+ /** Indicates that we're interpreting patch code - RC only! */
+ bool fInPatchCode;
/** Explicit alignment padding. */
- bool afAlignment0[3];
+ bool afAlignment0[2];
/** The flags of the current exception / interrupt. */
uint32_t fCurXcpt;
@@ -268,10 +260,12 @@ typedef struct IEMCPU
/** Indicates that a MOVS instruction with overlapping source and destination
* was executed, causing the memory write records to be incorrrect. */
bool fOverlappingMovs;
+ /** Set if there are problematic memory accesses (MMIO, write monitored, ++). */
+ bool fProblematicMemory;
/** This is used to communicate a CPL changed caused by IEMInjectTrap that
* CPUM doesn't yet reflect. */
uint8_t uInjectCpl;
- bool afAlignment2[4];
+ bool afAlignment2[3];
/** Mask of undefined eflags.
* The verifier will any difference in these flags. */
uint32_t fUndefinedEFlags;
@@ -376,6 +370,28 @@ typedef struct IEMCPU
uint8_t ab[512];
} aBounceBuffers[3];
+ /** @name Target CPU information.
+ * @{ */
+ /** EDX value of CPUID(1).
+ * @remarks Some bits are subject to change and must be queried dynamically. */
+ uint32_t fCpuIdStdFeaturesEdx;
+ /** ECX value of CPUID(1).
+ * @remarks Some bits are subject to change and must be queried dynamically. */
+ uint32_t fCpuIdStdFeaturesEcx;
+ /** The CPU vendor. */
+ CPUMCPUVENDOR enmCpuVendor;
+ /** @} */
+
+ /** @name Host CPU information.
+ * @{ */
+ /** EDX value of CPUID(1). */
+ uint32_t fHostCpuIdStdFeaturesEdx;
+ /** ECX value of CPUID(1). */
+ uint32_t fHostCpuIdStdFeaturesEcx;
+ /** The CPU vendor. */
+ CPUMCPUVENDOR enmHostCpuVendor;
+ /** @} */
+
#ifdef IEM_VERIFICATION_MODE_FULL
/** The event verification records for what IEM did (LIFO). */
R3PTRTYPE(PIEMVERIFYEVTREC) pIemEvtRecHead;
@@ -391,6 +407,8 @@ typedef struct IEMCPU
} IEMCPU;
/** Pointer to the per-CPU IEM state. */
typedef IEMCPU *PIEMCPU;
+/** Pointer to the const per-CPU IEM state. */
+typedef IEMCPU const *PCIEMCPU;
/** Converts a IEMCPU pointer to a VMCPU pointer.
* @returns VMCPU pointer.
@@ -463,6 +481,44 @@ typedef IEMCPU *PIEMCPU;
#define IEM_OP_PRF_REX_R RT_BIT_32(25) /**< REX.R prefix (0x44,0x45,0x46,0x47,0x4c,0x4d,0x4e,0x4f). */
#define IEM_OP_PRF_REX_B RT_BIT_32(26) /**< REX.B prefix (0x41,0x43,0x45,0x47,0x49,0x4b,0x4d,0x4f). */
#define IEM_OP_PRF_REX_X RT_BIT_32(27) /**< REX.X prefix (0x42,0x43,0x46,0x47,0x4a,0x4b,0x4e,0x4f). */
+/** Mask with all the REX prefix flags.
+ * This is generally for use when needing to undo the REX prefixes when they
+ * are followed legacy prefixes and therefore does not immediately preceed
+ * the first opcode byte.
+ * For testing whether any REX prefix is present, use IEM_OP_PRF_REX instead. */
+#define IEM_OP_PRF_REX_MASK (IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W )
+/** @} */
+
+/** @name Opcode forms
+ * @{ */
+/** ModR/M: reg, r/m */
+#define IEMOPFORM_RM 0
+/** ModR/M: reg, r/m (register) */
+#define IEMOPFORM_RM_REG (IEMOPFORM_RM | IEMOPFORM_MOD3)
+/** ModR/M: reg, r/m (memory) */
+#define IEMOPFORM_RM_MEM (IEMOPFORM_RM | IEMOPFORM_NOT_MOD3)
+/** ModR/M: r/m, reg */
+#define IEMOPFORM_MR 1
+/** ModR/M: r/m (register), reg */
+#define IEMOPFORM_MR_REG (IEMOPFORM_MR | IEMOPFORM_MOD3)
+/** ModR/M: r/m (memory), reg */
+#define IEMOPFORM_MR_MEM (IEMOPFORM_MR | IEMOPFORM_NOT_MOD3)
+/** ModR/M: r/m only */
+#define IEMOPFORM_M 2
+/** ModR/M: r/m only (register). */
+#define IEMOPFORM_M_REG (IEMOPFORM_M | IEMOPFORM_MOD3)
+/** ModR/M: r/m only (memory). */
+#define IEMOPFORM_M_MEM (IEMOPFORM_M | IEMOPFORM_NOT_MOD3)
+/** ModR/M: reg only */
+#define IEMOPFORM_R 3
+
+/** Fixed register instruction, no R/M. */
+#define IEMOPFORM_FIXED 4
+
+/** The r/m is a register. */
+#define IEMOPFORM_MOD3 RT_BIT_32(8)
+/** The r/m is a memory access. */
+#define IEMOPFORM_NOT_MOD3 RT_BIT_32(9)
/** @} */
/**
@@ -482,7 +538,7 @@ typedef IEMCPU *PIEMCPU;
/**
* Tests if full verification mode is enabled.
*
- * This expands to @c false when IEM_VERIFICATION_MODE is not defined and
+ * This expands to @c false when IEM_VERIFICATION_MODE_FULL is not defined and
* should therefore cause the compiler to eliminate the verification branch
* of an if statement. */
#ifdef IEM_VERIFICATION_MODE_FULL
@@ -491,6 +547,22 @@ typedef IEMCPU *PIEMCPU;
# define IEM_FULL_VERIFICATION_ENABLED(a_pIemCpu) (false)
#endif
+/**
+ * Tests if full verification mode is enabled again REM.
+ *
+ * This expands to @c false when IEM_VERIFICATION_MODE_FULL is not defined and
+ * should therefore cause the compiler to eliminate the verification branch
+ * of an if statement. */
+#ifdef IEM_VERIFICATION_MODE_FULL
+# ifdef IEM_VERIFICATION_MODE_FULL_HM
+# define IEM_FULL_VERIFICATION_REM_ENABLED(a_pIemCpu) (!(a_pIemCpu)->fNoRem && !HMIsEnabled(IEMCPU_TO_VM(a_pIemCpu)))
+# else
+# define IEM_FULL_VERIFICATION_REM_ENABLED(a_pIemCpu) (!(a_pIemCpu)->fNoRem)
+# endif
+#else
+# define IEM_FULL_VERIFICATION_REM_ENABLED(a_pIemCpu) (false)
+#endif
+
/** @def IEM_VERIFICATION_MODE
* Indicates that one of the verfication modes are enabled.
*/
@@ -677,6 +749,16 @@ IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_locked,(PRTUINT128U *pu128Dst, PRTUI
uint32_t *pEFlags));
/** @} */
+/** @name Memory ordering
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEMFENCE,(void));
+typedef FNIEMAIMPLMEMFENCE *PFNIEMAIMPLMEMFENCE;
+IEM_DECL_IMPL_DEF(void, iemAImpl_mfence,(void));
+IEM_DECL_IMPL_DEF(void, iemAImpl_sfence,(void));
+IEM_DECL_IMPL_DEF(void, iemAImpl_lfence,(void));
+IEM_DECL_IMPL_DEF(void, iemAImpl_alt_mem_fence,(void));
+/** @} */
+
/** @name Double precision shifts
* @{ */
typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU16,(uint16_t *pu16Dst, uint16_t u16Src, uint8_t cShift, uint32_t *pEFlags));
@@ -834,6 +916,11 @@ IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u32,(uint32_t *pu32Dst));
IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u64,(uint64_t *pu64Dst));
/** @} */
+/** @name Misc.
+ * @{ */
+FNIEMAIMPLBINU16 iemAImpl_arpl;
+/** @} */
+
/** @name FPU operations taking a 32-bit float argument
* @{ */
@@ -1014,7 +1101,62 @@ IEM_DECL_IMPL_DEF(void, iemAImpl_fist_r80_to_i64,(PCX86FXSTATE pFpuState, uint16
int64_t *pi64Val, PCRTFLOAT80U pr80Val));
IEM_DECL_IMPL_DEF(void, iemAImpl_fistt_r80_to_i64,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
int64_t *pi32Val, PCRTFLOAT80U pr80Val));
-/** @} */
+/** @} */
+
+
+/** Temporary type representing a 256-bit vector register. */
+typedef struct {uint64_t au64[4]; } IEMVMM256;
+/** Temporary type pointing to a 256-bit vector register. */
+typedef IEMVMM256 *PIEMVMM256;
+/** Temporary type pointing to a const 256-bit vector register. */
+typedef IEMVMM256 *PCIEMVMM256;
+
+
+/** @name Media (SSE/MMX/AVX) operations: full1 + full2 -> full1.
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U64,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint64_t const *pu64Src));
+typedef FNIEMAIMPLMEDIAF2U64 *PFNIEMAIMPLMEDIAF2U64;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U128,(PCX86FXSTATE pFpuState, uint128_t *pu128Dst, uint128_t const *pu128Src));
+typedef FNIEMAIMPLMEDIAF2U128 *PFNIEMAIMPLMEDIAF2U128;
+FNIEMAIMPLMEDIAF2U64 iemAImpl_pxor_u64, iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqd_u64;
+FNIEMAIMPLMEDIAF2U128 iemAImpl_pxor_u128, iemAImpl_pcmpeqb_u128, iemAImpl_pcmpeqw_u128, iemAImpl_pcmpeqd_u128;
+/** @} */
+
+/** @name Media (SSE/MMX/AVX) operations: lowhalf1 + lowhalf1 -> full1.
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF1L1U64,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint32_t const *pu32Src));
+typedef FNIEMAIMPLMEDIAF1L1U64 *PFNIEMAIMPLMEDIAF1L1U64;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF1L1U128,(PCX86FXSTATE pFpuState, uint128_t *pu128Dst, uint64_t const *pu64Src));
+typedef FNIEMAIMPLMEDIAF1L1U128 *PFNIEMAIMPLMEDIAF1L1U128;
+FNIEMAIMPLMEDIAF1L1U64 iemAImpl_punpcklbw_u64, iemAImpl_punpcklwd_u64, iemAImpl_punpckldq_u64;
+FNIEMAIMPLMEDIAF1L1U128 iemAImpl_punpcklbw_u128, iemAImpl_punpcklwd_u128, iemAImpl_punpckldq_u128, iemAImpl_punpcklqdq_u128;
+/** @} */
+
+/** @name Media (SSE/MMX/AVX) operations: hihalf1 + hihalf2 -> full1.
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF1H1U64,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint64_t const *pu64Src));
+typedef FNIEMAIMPLMEDIAF2U64 *PFNIEMAIMPLMEDIAF1H1U64;
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF1H1U128,(PCX86FXSTATE pFpuState, uint128_t *pu128Dst, uint128_t const *pu128Src));
+typedef FNIEMAIMPLMEDIAF2U128 *PFNIEMAIMPLMEDIAF1H1U128;
+FNIEMAIMPLMEDIAF1H1U64 iemAImpl_punpckhbw_u64, iemAImpl_punpckhwd_u64, iemAImpl_punpckhdq_u64;
+FNIEMAIMPLMEDIAF1H1U128 iemAImpl_punpckhbw_u128, iemAImpl_punpckhwd_u128, iemAImpl_punpckhdq_u128, iemAImpl_punpckhqdq_u128;
+/** @} */
+
+/** @name Media (SSE/MMX/AVX) operation: Packed Shuffle Stuff (evil)
+ * @{ */
+typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUF,(PCX86FXSTATE pFpuState, uint128_t *pu128Dst,
+ uint128_t const *pu128Src, uint8_t bEvil));
+typedef FNIEMAIMPLMEDIAPSHUF *PFNIEMAIMPLMEDIAPSHUF;
+FNIEMAIMPLMEDIAPSHUF iemAImpl_pshufhw, iemAImpl_pshuflw, iemAImpl_pshufd;
+IEM_DECL_IMPL_DEF(void, iemAImpl_pshufw,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint64_t const *pu64Src, uint8_t bEvil));
+/** @} */
+
+/** @name Media (SSE/MMX/AVX) operation: Move Byte Mask
+ * @{ */
+IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u64,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint64_t const *pu64Src));
+IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u128,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint128_t const *pu128Src));
+/** @} */
+
/** @name Function tables.
@@ -1094,6 +1236,43 @@ typedef struct IEMOPSHIFTDBLSIZES
typedef IEMOPSHIFTDBLSIZES const *PCIEMOPSHIFTDBLSIZES;
+/**
+ * Function table for media instruction taking two full sized media registers,
+ * optionally the 2nd being a memory reference (only modifying the first op.)
+ */
+typedef struct IEMOPMEDIAF2
+{
+ PFNIEMAIMPLMEDIAF2U64 pfnU64;
+ PFNIEMAIMPLMEDIAF2U128 pfnU128;
+} IEMOPMEDIAF2;
+/** Pointer to a media operation function table for full sized ops. */
+typedef IEMOPMEDIAF2 const *PCIEMOPMEDIAF2;
+
+/**
+ * Function table for media instruction taking taking one full and one lower
+ * half media register.
+ */
+typedef struct IEMOPMEDIAF1L1
+{
+ PFNIEMAIMPLMEDIAF1L1U64 pfnU64;
+ PFNIEMAIMPLMEDIAF1L1U128 pfnU128;
+} IEMOPMEDIAF1L1;
+/** Pointer to a media operation function table for lowhalf+lowhalf -> full. */
+typedef IEMOPMEDIAF1L1 const *PCIEMOPMEDIAF1L1;
+
+/**
+ * Function table for media instruction taking taking one full and one high half
+ * media register.
+ */
+typedef struct IEMOPMEDIAF1H1
+{
+ PFNIEMAIMPLMEDIAF1H1U64 pfnU64;
+ PFNIEMAIMPLMEDIAF1H1U128 pfnU128;
+} IEMOPMEDIAF1H1;
+/** Pointer to a media operation function table for hihalf+hihalf -> full. */
+typedef IEMOPMEDIAF1H1 const *PCIEMOPMEDIAF1H1;
+
+
/** @} */
diff --git a/src/VBox/VMM/include/IOMInline.h b/src/VBox/VMM/include/IOMInline.h
index 3804e9e1..7c7ac41e 100644
--- a/src/VBox/VMM/include/IOMInline.h
+++ b/src/VBox/VMM/include/IOMInline.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2011 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -34,7 +34,7 @@
*/
DECLINLINE(CTX_SUFF(PIOMIOPORTRANGE)) iomIOPortGetRange(PVM pVM, RTIOPORT Port)
{
- Assert(PDMCritSectIsOwner(&pVM->iom.s.CritSect));
+ Assert(IOM_IS_SHARED_LOCK_OWNER(pVM));
return (CTX_SUFF(PIOMIOPORTRANGE))RTAvlroIOPortRangeGet(&pVM->iom.s.CTX_SUFF(pTrees)->CTX_SUFF(IOPortTree), Port);
}
@@ -50,7 +50,7 @@ DECLINLINE(CTX_SUFF(PIOMIOPORTRANGE)) iomIOPortGetRange(PVM pVM, RTIOPORT Port)
*/
DECLINLINE(PIOMIOPORTRANGER3) iomIOPortGetRangeR3(PVM pVM, RTIOPORT Port)
{
- Assert(PDMCritSectIsOwner(&pVM->iom.s.CritSect));
+ Assert(IOM_IS_SHARED_LOCK_OWNER(pVM));
return (PIOMIOPORTRANGER3)RTAvlroIOPortRangeGet(&pVM->iom.s.CTX_SUFF(pTrees)->IOPortTreeR3, Port);
}
@@ -62,15 +62,16 @@ DECLINLINE(PIOMIOPORTRANGER3) iomIOPortGetRangeR3(PVM pVM, RTIOPORT Port)
* @returns NULL if address not in a MMIO range.
*
* @param pVM Pointer to the VM.
+ * @param pVCpu Pointer to the virtual CPU structure of the caller.
* @param GCPhys Physical address to lookup.
*/
-DECLINLINE(PIOMMMIORANGE) iomMmioGetRange(PVM pVM, RTGCPHYS GCPhys)
+DECLINLINE(PIOMMMIORANGE) iomMmioGetRange(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
{
- Assert(PDMCritSectIsOwner(&pVM->iom.s.CritSect));
- PIOMMMIORANGE pRange = pVM->iom.s.CTX_SUFF(pMMIORangeLast);
+ Assert(IOM_IS_SHARED_LOCK_OWNER(pVM));
+ PIOMMMIORANGE pRange = pVCpu->iom.s.CTX_SUFF(pMMIORangeLast);
if ( !pRange
|| GCPhys - pRange->GCPhys >= pRange->cb)
- pVM->iom.s.CTX_SUFF(pMMIORangeLast) = pRange
+ pVCpu->iom.s.CTX_SUFF(pMMIORangeLast) = pRange
= (PIOMMMIORANGE)RTAvlroGCPhysRangeGet(&pVM->iom.s.CTX_SUFF(pTrees)->MMIOTree, GCPhys);
return pRange;
}
@@ -97,22 +98,23 @@ DECLINLINE(void) iomMmioRetainRange(PIOMMMIORANGE pRange)
* @returns NULL if address not in a MMIO range.
*
* @param pVM Pointer to the VM.
+ * @param pVCpu Pointer to the virtual CPU structure of the caller.
* @param GCPhys Physical address to lookup.
*/
-DECLINLINE(PIOMMMIORANGE) iomMmioGetRangeWithRef(PVM pVM, RTGCPHYS GCPhys)
+DECLINLINE(PIOMMMIORANGE) iomMmioGetRangeWithRef(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
{
- int rc = PDMCritSectEnter(&pVM->iom.s.CritSect, VINF_SUCCESS);
+ int rc = IOM_LOCK_SHARED_EX(pVM, VINF_SUCCESS);
AssertRCReturn(rc, NULL);
- PIOMMMIORANGE pRange = pVM->iom.s.CTX_SUFF(pMMIORangeLast);
+ PIOMMMIORANGE pRange = pVCpu->iom.s.CTX_SUFF(pMMIORangeLast);
if ( !pRange
|| GCPhys - pRange->GCPhys >= pRange->cb)
- pVM->iom.s.CTX_SUFF(pMMIORangeLast) = pRange
+ pVCpu->iom.s.CTX_SUFF(pMMIORangeLast) = pRange
= (PIOMMMIORANGE)RTAvlroGCPhysRangeGet(&pVM->iom.s.CTX_SUFF(pTrees)->MMIOTree, GCPhys);
if (pRange)
iomMmioRetainRange(pRange);
- PDMCritSectLeave(&pVM->iom.s.CritSect);
+ IOM_UNLOCK_SHARED(pVM);
return pRange;
}
@@ -139,14 +141,15 @@ DECLINLINE(void) iomMmioReleaseRange(PVM pVM, PIOMMMIORANGE pRange)
* @returns NULL if address not in a MMIO range.
*
* @param pVM Pointer to the VM.
+ * @param pVCpu Pointer to the virtual CPU structure of the caller.
* @param GCPhys Physical address to lookup.
*/
-DECLINLINE(PIOMMMIORANGE) iomMMIOGetRangeUnsafe(PVM pVM, RTGCPHYS GCPhys)
+DECLINLINE(PIOMMMIORANGE) iomMMIOGetRangeUnsafe(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
{
- PIOMMMIORANGE pRange = pVM->iom.s.CTX_SUFF(pMMIORangeLast);
+ PIOMMMIORANGE pRange = pVCpu->iom.s.CTX_SUFF(pMMIORangeLast);
if ( !pRange
|| GCPhys - pRange->GCPhys >= pRange->cb)
- pVM->iom.s.CTX_SUFF(pMMIORangeLast) = pRange
+ pVCpu->iom.s.CTX_SUFF(pMMIORangeLast) = pRange
= (PIOMMMIORANGE)RTAvlroGCPhysRangeGet(&pVM->iom.s.CTX_SUFF(pTrees)->MMIOTree, GCPhys);
return pRange;
}
@@ -164,29 +167,39 @@ DECLINLINE(PIOMMMIORANGE) iomMMIOGetRangeUnsafe(PVM pVM, RTGCPHYS GCPhys)
* @returns NULL if not found (R0/GC), or out of memory (R3).
*
* @param pVM Pointer to the VM.
+ * @param pVCpu Pointer to the virtual CPU structure of the caller.
* @param GCPhys Physical address to lookup.
* @param pRange The MMIO range.
+ *
+ * @remarks The caller holds the IOM critical section with shared access prior
+ * to calling this method. Upon return, the lock has been released!
+ * This is ugly, but it's a necessary evil since we cannot upgrade read
+ * locks to write locks and the whole purpose here is calling
+ * iomR3MMIOStatsCreate.
*/
-DECLINLINE(PIOMMMIOSTATS) iomMmioGetStats(PVM pVM, RTGCPHYS GCPhys, PIOMMMIORANGE pRange)
+DECLINLINE(PIOMMMIOSTATS) iomMmioGetStats(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, PIOMMMIORANGE pRange)
{
- PDMCritSectEnter(&pVM->iom.s.CritSect, VINF_SUCCESS);
+ Assert(IOM_IS_SHARED_LOCK_OWNER(pVM));
/* For large ranges, we'll put everything on the first byte. */
if (pRange->cb > PAGE_SIZE)
GCPhys = pRange->GCPhys;
- PIOMMMIOSTATS pStats = pVM->iom.s.CTX_SUFF(pMMIOStatsLast);
+ PIOMMMIOSTATS pStats = pVCpu->iom.s.CTX_SUFF(pMMIOStatsLast);
if ( !pStats
|| pStats->Core.Key != GCPhys)
{
pStats = (PIOMMMIOSTATS)RTAvloGCPhysGet(&pVM->iom.s.CTX_SUFF(pTrees)->MmioStatTree, GCPhys);
# ifdef IN_RING3
if (!pStats)
- pStats = iomR3MMIOStatsCreate(pVM, GCPhys, pRange->pszDesc);
+ {
+ IOM_UNLOCK_SHARED(pVM);
+ return iomR3MMIOStatsCreate(pVM, GCPhys, pRange->pszDesc);
+ }
# endif
}
- PDMCritSectLeave(&pVM->iom.s.CritSect);
+ IOM_UNLOCK_SHARED(pVM);
return pStats;
}
#endif /* VBOX_WITH_STATISTICS */
diff --git a/src/VBox/VMM/include/IOMInternal.h b/src/VBox/VMM/include/IOMInternal.h
index 1d7a828c..1b80383f 100644
--- a/src/VBox/VMM/include/IOMInternal.h
+++ b/src/VBox/VMM/include/IOMInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -18,12 +18,17 @@
#ifndef ___IOMInternal_h
#define ___IOMInternal_h
+#define IOM_WITH_CRIT_SECT_RW
+
#include <VBox/cdefs.h>
#include <VBox/types.h>
#include <VBox/vmm/iom.h>
#include <VBox/vmm/stam.h>
#include <VBox/vmm/pgm.h>
#include <VBox/vmm/pdmcritsect.h>
+#ifdef IOM_WITH_CRIT_SECT_RW
+# include <VBox/vmm/pdmcritsectrw.h>
+#endif
#include <VBox/param.h>
#include <iprt/assert.h>
#include <iprt/avl.h>
@@ -323,32 +328,11 @@ typedef struct IOM
#endif
/** Lock serializing EMT access to IOM. */
+#ifdef IOM_WITH_CRIT_SECT_RW
+ PDMCRITSECTRW CritSect;
+#else
PDMCRITSECT CritSect;
-
- /** @name Caching of I/O Port and MMIO ranges and statistics.
- * (Saves quite some time in rep outs/ins instruction emulation.)
- * @{ */
- R3PTRTYPE(PIOMIOPORTRANGER3) pRangeLastReadR3;
- R3PTRTYPE(PIOMIOPORTRANGER3) pRangeLastWriteR3;
- R3PTRTYPE(PIOMIOPORTSTATS) pStatsLastReadR3;
- R3PTRTYPE(PIOMIOPORTSTATS) pStatsLastWriteR3;
- R3PTRTYPE(PIOMMMIORANGE) pMMIORangeLastR3;
- R3PTRTYPE(PIOMMMIOSTATS) pMMIOStatsLastR3;
-
- R0PTRTYPE(PIOMIOPORTRANGER0) pRangeLastReadR0;
- R0PTRTYPE(PIOMIOPORTRANGER0) pRangeLastWriteR0;
- R0PTRTYPE(PIOMIOPORTSTATS) pStatsLastReadR0;
- R0PTRTYPE(PIOMIOPORTSTATS) pStatsLastWriteR0;
- R0PTRTYPE(PIOMMMIORANGE) pMMIORangeLastR0;
- R0PTRTYPE(PIOMMMIOSTATS) pMMIOStatsLastR0;
-
- RCPTRTYPE(PIOMIOPORTRANGERC) pRangeLastReadRC;
- RCPTRTYPE(PIOMIOPORTRANGERC) pRangeLastWriteRC;
- RCPTRTYPE(PIOMIOPORTSTATS) pStatsLastReadRC;
- RCPTRTYPE(PIOMIOPORTSTATS) pStatsLastWriteRC;
- RCPTRTYPE(PIOMMMIORANGE) pMMIORangeLastRC;
- RCPTRTYPE(PIOMMMIOSTATS) pMMIOStatsLastRC;
- /** @} */
+#endif
/** @name I/O Port statistics.
* @{ */
@@ -404,6 +388,31 @@ typedef struct IOMCPU
/** For saving stack space, the disassembler state is allocated here instead of
* on the stack. */
DISCPUSTATE DisState;
+
+ /** @name Caching of I/O Port and MMIO ranges and statistics.
+ * (Saves quite some time in rep outs/ins instruction emulation.)
+ * @{ */
+ R3PTRTYPE(PIOMIOPORTRANGER3) pRangeLastReadR3;
+ R3PTRTYPE(PIOMIOPORTRANGER3) pRangeLastWriteR3;
+ R3PTRTYPE(PIOMIOPORTSTATS) pStatsLastReadR3;
+ R3PTRTYPE(PIOMIOPORTSTATS) pStatsLastWriteR3;
+ R3PTRTYPE(PIOMMMIORANGE) pMMIORangeLastR3;
+ R3PTRTYPE(PIOMMMIOSTATS) pMMIOStatsLastR3;
+
+ R0PTRTYPE(PIOMIOPORTRANGER0) pRangeLastReadR0;
+ R0PTRTYPE(PIOMIOPORTRANGER0) pRangeLastWriteR0;
+ R0PTRTYPE(PIOMIOPORTSTATS) pStatsLastReadR0;
+ R0PTRTYPE(PIOMIOPORTSTATS) pStatsLastWriteR0;
+ R0PTRTYPE(PIOMMMIORANGE) pMMIORangeLastR0;
+ R0PTRTYPE(PIOMMMIOSTATS) pMMIOStatsLastR0;
+
+ RCPTRTYPE(PIOMIOPORTRANGERC) pRangeLastReadRC;
+ RCPTRTYPE(PIOMIOPORTRANGERC) pRangeLastWriteRC;
+ RCPTRTYPE(PIOMIOPORTSTATS) pStatsLastReadRC;
+ RCPTRTYPE(PIOMIOPORTSTATS) pStatsLastWriteRC;
+ RCPTRTYPE(PIOMMMIORANGE) pMMIORangeLastRC;
+ RCPTRTYPE(PIOMMMIOSTATS) pMMIOStatsLastRC;
+ /** @} */
} IOMCPU;
/** Pointer to IOM per virtual CPU instance data. */
typedef IOMCPU *PIOMCPU;
@@ -413,7 +422,6 @@ RT_C_DECLS_BEGIN
void iomMmioFreeRange(PVM pVM, PIOMMMIORANGE pRange);
#ifdef IN_RING3
-PIOMIOPORTSTATS iomR3IOPortStatsCreate(PVM pVM, RTIOPORT Port, const char *pszDesc);
PIOMMMIOSTATS iomR3MMIOStatsCreate(PVM pVM, RTGCPHYS GCPhys, const char *pszDesc);
#endif /* IN_RING3 */
@@ -425,13 +433,33 @@ DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, voi
#endif
/* IOM locking helpers. */
-#define IOM_LOCK(a_pVM) PDMCritSectEnter(&(a_pVM)->iom.s.CritSect, VERR_SEM_BUSY)
-#define IOM_UNLOCK(a_pVM) do { PDMCritSectLeave(&(a_pVM)->iom.s.CritSect); } while (0)
+#ifdef IOM_WITH_CRIT_SECT_RW
+# define IOM_LOCK_EXCL(a_pVM) PDMCritSectRwEnterExcl(&(a_pVM)->iom.s.CritSect, VERR_SEM_BUSY)
+# define IOM_UNLOCK_EXCL(a_pVM) do { PDMCritSectRwLeaveExcl(&(a_pVM)->iom.s.CritSect); } while (0)
+# if 0 /* (in case needed for debugging) */
+# define IOM_LOCK_SHARED_EX(a_pVM, a_rcBusy) PDMCritSectRwEnterExcl(&(a_pVM)->iom.s.CritSect, (a_rcBusy))
+# define IOM_UNLOCK_SHARED(a_pVM) do { PDMCritSectRwLeaveExcl(&(a_pVM)->iom.s.CritSect); } while (0)
+# define IOM_IS_SHARED_LOCK_OWNER(a_pVM) PDMCritSectRwIsWriteOwner(&(a_pVM)->iom.s.CritSect)
+# else
+# define IOM_LOCK_SHARED_EX(a_pVM, a_rcBusy) PDMCritSectRwEnterShared(&(a_pVM)->iom.s.CritSect, (a_rcBusy))
+# define IOM_UNLOCK_SHARED(a_pVM) do { PDMCritSectRwLeaveShared(&(a_pVM)->iom.s.CritSect); } while (0)
+# define IOM_IS_SHARED_LOCK_OWNER(a_pVM) PDMCritSectRwIsReadOwner(&(a_pVM)->iom.s.CritSect, true)
+# endif
+# define IOM_IS_EXCL_LOCK_OWNER(a_pVM) PDMCritSectRwIsWriteOwner(&(a_pVM)->iom.s.CritSect)
+#else
+# define IOM_LOCK_EXCL(a_pVM) PDMCritSectEnter(&(a_pVM)->iom.s.CritSect, VERR_SEM_BUSY)
+# define IOM_UNLOCK_EXCL(a_pVM) do { PDMCritSectLeave(&(a_pVM)->iom.s.CritSect); } while (0)
+# define IOM_LOCK_SHARED_EX(a_pVM, a_rcBusy) PDMCritSectEnter(&(a_pVM)->iom.s.CritSect, (a_rcBusy))
+# define IOM_UNLOCK_SHARED(a_pVM) do { PDMCritSectLeave(&(a_pVM)->iom.s.CritSect); } while (0)
+# define IOM_IS_SHARED_LOCK_OWNER(a_pVM) PDMCritSectIsOwner(&(a_pVM)->iom.s.CritSect)
+# define IOM_IS_EXCL_LOCK_OWNER(a_pVM) PDMCritSectIsOwner(&(a_pVM)->iom.s.CritSect)
+#endif
+#define IOM_LOCK_SHARED(a_pVM) IOM_LOCK_SHARED_EX(a_pVM, VERR_SEM_BUSY)
/* Disassembly helpers used in IOMAll.cpp & IOMAllMMIO.cpp */
bool iomGetRegImmData(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t *pu64Data, unsigned *pcbSize);
-bool iomSaveDataToReg(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t u32Data);
+bool iomSaveDataToReg(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t u64Data);
RT_C_DECLS_END
diff --git a/src/VBox/VMM/include/MMInternal.h b/src/VBox/VMM/include/MMInternal.h
index c058a144..950debe7 100644
--- a/src/VBox/VMM/include/MMInternal.h
+++ b/src/VBox/VMM/include/MMInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
diff --git a/src/VBox/VMM/include/PATMA.h b/src/VBox/VMM/include/PATMA.h
index f7d5435a..cfe44f21 100644
--- a/src/VBox/VMM/include/PATMA.h
+++ b/src/VBox/VMM/include/PATMA.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -145,6 +145,7 @@ extern PATCHASMRECORD PATMPopf32Record_NoExit;
extern PATCHASMRECORD PATMPushf32Record;
extern PATCHASMRECORD PATMPushf16Record;
extern PATCHASMRECORD PATMIretRecord;
+extern PATCHASMRECORD PATMIretRing1Record;
extern PATCHASMRECORD PATMCpuidRecord;
extern PATCHASMRECORD PATMLoopRecord;
extern PATCHASMRECORD PATMLoopZRecord;
diff --git a/src/VBox/VMM/include/PATMInternal.h b/src/VBox/VMM/include/PATMInternal.h
index 4c8a5fb3..837f9ed8 100644
--- a/src/VBox/VMM/include/PATMInternal.h
+++ b/src/VBox/VMM/include/PATMInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2012 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -29,11 +29,16 @@
#include <VBox/log.h>
-
-#define PATM_SSM_VERSION 55
+/** @name Saved state version numbers.
+ * @{ */
+/** Uses normal structure serialization with markers and everything. */
+#define PATM_SSM_VERSION 56
+/** Last version which saves structures as raw memory. */
+#define PATM_SSM_VERSION_MEM 55
#define PATM_SSM_VERSION_FIXUP_HACK 54
#define PATM_SSM_VERSION_FIXUP_HACK 54
#define PATM_SSM_VERSION_VER16 53
+/** @} */
/* Enable for call patching. */
#define PATM_ENABLE_CALL
@@ -424,11 +429,13 @@ typedef struct PATM
/** Delta to the new relocated HMA area.
* Used only during PATMR3Relocate(). */
int32_t deltaReloc;
- /* GC PATM state pointer - HC pointer. */
+ /** GC PATM state pointer - HC pointer. */
R3PTRTYPE(PPATMGCSTATE) pGCStateHC;
- /* GC PATM state pointer - GC pointer. */
+ /** GC PATM state pointer - RC pointer. */
RCPTRTYPE(PPATMGCSTATE) pGCStateGC;
- /** PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
+ /** PATM stack page for call instruction execution.
+ * 2 parts: one for our private stack and one to store the original return
+ * address. */
RCPTRTYPE(RTRCPTR *) pGCStackGC;
/** HC pointer of the PATM stack page. */
R3PTRTYPE(RTRCPTR *) pGCStackHC;
@@ -485,6 +492,14 @@ typedef struct PATM
#endif
} savedstate;
+ /** Debug module for the patch memory. */
+ RTDBGMOD hDbgModPatchMem;
+
+#if HC_ARCH_BITS == 32
+ /** Align statistics on a 8 byte boundary. */
+ uint32_t u32Alignment1;
+#endif
+
STAMCOUNTER StatNrOpcodeRead;
STAMCOUNTER StatDisabled;
STAMCOUNTER StatUnusable;
@@ -553,59 +568,16 @@ DECLCALLBACK(int) patmR3Save(PVM pVM, PSSMHANDLE pSSM);
DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
#ifdef IN_RING3
-RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC);
-RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC);
-RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC);
+RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC);
+RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC);
+RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC);
#endif
-/* Add a patch to guest lookup record
- *
- * @param pVM Pointer to the VM.
- * @param pPatch Patch structure ptr
- * @param pPatchInstrHC Guest context pointer to patch block
- * @param pInstrGC Guest context pointer to privileged instruction
- * @param enmType Lookup type
- * @param fDirty Dirty flag
- *
- */
-void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty=false);
-
-/**
- * Insert page records for all guest pages that contain instructions that were recompiled for this patch
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pPatch Patch record
- */
-int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch);
-
-/**
- * Remove page records for all guest pages that contain instructions that were recompiled for this patch
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pPatch Patch record
- */
-int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch);
-
-/**
- * Returns the GC address of the corresponding patch statistics counter
- *
- * @returns Stat address
- * @param pVM Pointer to the VM.
- * @param pPatch Patch structure
- */
-RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch);
-
-/**
- * Remove patch for privileged instruction at specified location
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pPatchRec Patch record
- * @param fForceRemove Remove *all* patches
- */
-int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove);
+void patmR3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC,
+ PATM_LOOKUP_TYPE enmType, bool fDirty = false);
+int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch);
+RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch);
+int patmR3RemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove);
/**
* Call for analysing the instructions following the privileged instr. for compliance with our heuristics
@@ -620,60 +592,14 @@ int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove);
*/
typedef int (VBOXCALL *PFN_PATMR3ANALYSE)(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec);
-/**
- * Install guest OS specific patch
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pCpu Disassembly state of instruction.
- * @param pInstrGC GC Instruction pointer for instruction
- * @param pInstrHC GC Instruction pointer for instruction
- * @param pPatchRec Patch structure
- *
- */
-int PATMInstallGuestSpecificPatch(PVM pVM, PDISCPUSTATE pCpu, RTRCPTR pInstrGC, uint8_t *pInstrHC, PPATMPATCHREC pPatchRec);
-
-
-/**
- * Check if the instruction is patched as a duplicated function
- *
- * @returns patch record
- * @param pVM Pointer to the VM.
- * @param pInstrGC Guest context point to the instruction
- *
- */
-VMMDECL(PPATMPATCHREC) PATMQueryFunctionPatch(PVM pVM, RTRCPTR pInstrGC);
-
-
-/**
- * Empty the specified tree (PV tree, MMR3 heap)
- *
- * @param pVM Pointer to the VM.
- * @param ppTree Tree to empty
- */
-void patmEmptyTree(PVM pVM, PPAVLPVNODECORE ppTree);
-
-
-/**
- * Empty the specified tree (U32 tree, MMR3 heap)
- *
- * @param pVM Pointer to the VM.
- * @param ppTree Tree to empty
- */
-void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree);
-
-
-/**
- * Return the name of the patched instruction
- *
- * @returns instruction name
- *
- * @param opcode DIS instruction opcode
- * @param fPatchFlags Patch flags
- */
-VMMDECL(const char *) patmGetInstructionString(uint32_t opcode, uint32_t fPatchFlags);
-
+int patmR3InstallGuestSpecificPatch(PVM pVM, PDISCPUSTATE pCpu, RTRCPTR pInstrGC, uint8_t *pInstrHC, PPATMPATCHREC pPatchRec);
+PPATMPATCHREC patmQueryFunctionPatch(PVM pVM, RTRCPTR pInstrGC);
+const char *patmGetInstructionString(uint32_t opcode, uint32_t fPatchFlags);
+PPATCHINFO patmFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints = false);
+int patmR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch);
+int patmAddBranchToLookupCache(PVM pVM, RTRCPTR pJumpTableGC, RTRCPTR pBranchTarget, RTRCUINTPTR pRelBranchPatch);
+R3PTRTYPE(uint8_t *) patmR3GCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr);
RT_C_DECLS_BEGIN
/**
@@ -690,63 +616,7 @@ RT_C_DECLS_BEGIN
*/
VMMRCDECL(int) PATMGCMonitorPage(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);
-/**
- * Find patch for privileged instruction at specified location
- *
- * @returns Patch structure pointer if found; else NULL
- * @param pVM Pointer to the VM.
- * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
- * @param fIncludeHints Include hinted patches or not
- *
- */
-PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints=false);
-
-/**
- * Patch cli/sti pushf/popf instruction block at specified location
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pInstrGC Guest context point to privileged instruction
- * @param pInstrHC Host context point to privileged instruction
- * @param uOpcode Instruction opcodee
- * @param uOpSize Size of starting instruction
- * @param pPatchRec Patch record
- *
- * @note returns failure if patching is not allowed or possible
- *
- */
-VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
- uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec);
-
-
-/**
- * Replace an instruction with a breakpoint (0xCC), that is handled dynamically in the guest context.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pInstrGC Guest context point to privileged instruction
- * @param pInstrHC Host context point to privileged instruction
- * @param pCpu Disassembly CPU structure ptr
- * @param pPatch Patch record
- *
- * @note returns failure if patching is not allowed or possible
- *
- */
-VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch);
-
-/**
- * Mark patch as dirty
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pPatch Patch record
- *
- * @note returns failure if patching is not allowed or possible
- *
- */
-VMMR3DECL(int) PATMR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch);
-
-R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr);
+RT_C_DECLS_END
/**
* Calculate the branch destination
@@ -755,7 +625,7 @@ R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RC
* @param pCpu Disassembly state of instruction.
* @param pBranchInstrGC GC pointer of branch instruction
*/
-inline RTRCPTR PATMResolveBranch(PDISCPUSTATE pCpu, RTRCPTR pBranchInstrGC)
+DECLINLINE(RTRCPTR) PATMResolveBranch(PDISCPUSTATE pCpu, RTRCPTR pBranchInstrGC)
{
uint32_t disp;
if (pCpu->Param1.fUse & DISUSE_IMMEDIATE8_REL)
@@ -784,11 +654,15 @@ inline RTRCPTR PATMResolveBranch(PDISCPUSTATE pCpu, RTRCPTR pBranchInstrGC)
#endif
}
-RT_C_DECLS_END
-
#ifdef LOG_ENABLED
int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec);
int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Analyse, PPATMP2GLOOKUPREC pCacheRec);
#endif
+
+void patmR3DbgInit(PVM pVM);
+void patmR3DbgTerm(PVM pVM);
+void patmR3DbgReset(PVM pVM);
+void patmR3DbgAddPatch(PVM pVM, PPATMPATCHREC pPatchRec);
+
#endif
diff --git a/src/VBox/VMM/include/PDMAsyncCompletionFileInternal.h b/src/VBox/VMM/include/PDMAsyncCompletionFileInternal.h
index a9f6ee81..d9d7816f 100644
--- a/src/VBox/VMM/include/PDMAsyncCompletionFileInternal.h
+++ b/src/VBox/VMM/include/PDMAsyncCompletionFileInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2008 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -271,6 +271,12 @@ typedef struct PDMASYNCCOMPLETIONEPCLASSFILE
RTR3UINTPTR uBitmaskAlignment;
/** Flag whether the out of resources warning was printed already. */
bool fOutOfResourcesWarningPrinted;
+#ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
+ /** Timer for delayed request completion. */
+ PTMTIMERR3 pTimer;
+ /** Milliseconds until the next delay expires. */
+ volatile uint64_t cMilliesNext;
+#endif
} PDMASYNCCOMPLETIONEPCLASSFILE;
/** Pointer to the endpoint class data. */
typedef PDMASYNCCOMPLETIONEPCLASSFILE *PPDMASYNCCOMPLETIONEPCLASSFILE;
@@ -379,6 +385,8 @@ typedef struct PDMASYNCCOMPLETIONENDPOINTFILE
#ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
/** Request delay. */
volatile uint32_t msDelay;
+ /** Request delay jitter. */
+ volatile uint32_t msJitter;
/** Number of requests to delay. */
volatile uint32_t cReqsDelay;
/** Task type to delay. */
@@ -419,6 +427,8 @@ typedef struct PDMASYNCCOMPLETIONENDPOINTFILE
* task writing to that range has to wait until the task completes.
*/
PAVLRFOFFTREE pTreeRangesLocked;
+ /** Number of requests with a range lock active. */
+ unsigned cLockedReqsActive;
/** Number of requests currently being processed for this endpoint
* (excluded flush requests). */
unsigned cRequestsActive;
diff --git a/src/VBox/VMM/include/PDMAsyncCompletionInternal.h b/src/VBox/VMM/include/PDMAsyncCompletionInternal.h
index 2180c128..30dbdd8a 100644
--- a/src/VBox/VMM/include/PDMAsyncCompletionInternal.h
+++ b/src/VBox/VMM/include/PDMAsyncCompletionInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2010 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -38,7 +38,7 @@ typedef struct PDMASYNCCOMPLETIONEPCLASSOPS
/** Version identifier. */
uint32_t u32Version;
/** Name of the endpoint class. */
- const char *pcszName;
+ const char *pszName;
/** Class type. */
PDMASYNCCOMPLETIONEPCLASSTYPE enmClassType;
/** Size of the global endpoint class data in bytes. */
@@ -183,6 +183,8 @@ typedef struct PDMASYNCCOMPLETIONEPCLASS
R3PTRTYPE(PCPDMASYNCCOMPLETIONEPCLASSOPS) pEndpointOps;
/** Task cache. */
RTMEMCACHE hMemCacheTasks;
+ /** Flag whether to gather advanced statistics about requests. */
+ bool fGatherAdvancedStatistics;
} PDMASYNCCOMPLETIONEPCLASS;
/** Pointer to the PDM async completion endpoint class data. */
typedef PDMASYNCCOMPLETIONEPCLASS *PPDMASYNCCOMPLETIONEPCLASS;
@@ -207,8 +209,28 @@ typedef struct PDMASYNCCOMPLETIONENDPOINT
char *pszUri;
/** Pointer to the assigned bandwidth manager. */
volatile PPDMACBWMGR pBwMgr;
-#ifdef VBOX_WITH_STATISTICS
+ /** Aligns following statistic counters on a 8 byte boundary. */
uint32_t u32Alignment;
+ /** @name Request size statistics.
+ * @{ */
+ STAMCOUNTER StatReqSizeSmaller512;
+ STAMCOUNTER StatReqSize512To1K;
+ STAMCOUNTER StatReqSize1KTo2K;
+ STAMCOUNTER StatReqSize2KTo4K;
+ STAMCOUNTER StatReqSize4KTo8K;
+ STAMCOUNTER StatReqSize8KTo16K;
+ STAMCOUNTER StatReqSize16KTo32K;
+ STAMCOUNTER StatReqSize32KTo64K;
+ STAMCOUNTER StatReqSize64KTo128K;
+ STAMCOUNTER StatReqSize128KTo256K;
+ STAMCOUNTER StatReqSize256KTo512K;
+ STAMCOUNTER StatReqSizeOver512K;
+ STAMCOUNTER StatReqsUnaligned512;
+ STAMCOUNTER StatReqsUnaligned4K;
+ STAMCOUNTER StatReqsUnaligned8K;
+ /** @} */
+ /** @name Request completion time statistics.
+ * @{ */
STAMCOUNTER StatTaskRunTimesNs[10];
STAMCOUNTER StatTaskRunTimesUs[10];
STAMCOUNTER StatTaskRunTimesMs[10];
@@ -219,11 +241,10 @@ typedef struct PDMASYNCCOMPLETIONENDPOINT
STAMCOUNTER StatIoOpsCompleted;
uint64_t tsIntervalStartMs;
uint64_t cIoOpsCompleted;
-#endif
+ /** @} */
} PDMASYNCCOMPLETIONENDPOINT;
-#ifdef VBOX_WITH_STATISTICS
+AssertCompileMemberAlignment(PDMASYNCCOMPLETIONENDPOINT, StatReqSizeSmaller512, sizeof(uint64_t));
AssertCompileMemberAlignment(PDMASYNCCOMPLETIONENDPOINT, StatTaskRunTimesNs, sizeof(uint64_t));
-#endif
/**
* A PDM async completion task handle.
@@ -245,28 +266,7 @@ typedef struct PDMASYNCCOMPLETIONTASK
uint64_t tsNsStart;
} PDMASYNCCOMPLETIONTASK;
-/**
- * Called by the endpoint if a task has finished.
- *
- * @returns nothing
- * @param pTask Pointer to the finished task.
- * @param rc Status code of the completed request.
- * @param fCallCompletionHandler Flag whether the completion handler should be called to
- * inform the owner of the task that it has completed.
- */
void pdmR3AsyncCompletionCompleteTask(PPDMASYNCCOMPLETIONTASK pTask, int rc, bool fCallCompletionHandler);
-
-/**
- * Checks if the endpoint is allowed to transfer the given amount of bytes.
- *
- * @returns true if the endpoint is allowed to transfer the data.
- * false otherwise
- * @param pEndpoint The endpoint.
- * @param cbTransfer The number of bytes to transfer.
- * @param pmsWhenNext Where to store the number of milliseconds
- * until the bandwidth is refreshed.
- * Only set if false is returned.
- */
bool pdmacEpIsTransferAllowed(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, uint32_t cbTransfer, RTMSINTERVAL *pmsWhenNext);
RT_C_DECLS_END
diff --git a/src/VBox/VMM/include/PDMBlkCacheInternal.h b/src/VBox/VMM/include/PDMBlkCacheInternal.h
index b2be57b8..47af634f 100644
--- a/src/VBox/VMM/include/PDMBlkCacheInternal.h
+++ b/src/VBox/VMM/include/PDMBlkCacheInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2008 Oracle Corporation
+ * Copyright (C) 2006-2011 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
diff --git a/src/VBox/VMM/include/PDMInternal.h b/src/VBox/VMM/include/PDMInternal.h
index ba8c6445..e247e73a 100644
--- a/src/VBox/VMM/include/PDMInternal.h
+++ b/src/VBox/VMM/include/PDMInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2011 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -25,10 +25,14 @@
#include <VBox/vusb.h>
#include <VBox/vmm/pdmasynccompletion.h>
#ifdef VBOX_WITH_NETSHAPER
-#include <VBox/vmm/pdmnetshaper.h>
-#endif /* VBOX_WITH_NETSHAPER */
+# include <VBox/vmm/pdmnetshaper.h>
+#endif
+#ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
+# include <VBox/vmm/pdmasynccompletion.h>
+#endif
#include <VBox/vmm/pdmblkcache.h>
#include <VBox/vmm/pdmcommon.h>
+#include <VBox/sup.h>
#include <iprt/assert.h>
#include <iprt/critsect.h>
#ifdef IN_RING3
@@ -52,10 +56,19 @@ RT_C_DECLS_BEGIN
/** @def PDMCRITSECT_STRICT
* Enables/disables PDM critsect strictness like deadlock detection. */
-#if (defined(RT_LOCK_STRICT) && defined(IN_RING3) && !defined(IEM_VERIFICATION_MODE)) || defined(DOXYGEN_RUNNING)
+#if (defined(RT_LOCK_STRICT) && defined(IN_RING3) && !defined(IEM_VERIFICATION_MODE) && !defined(PDMCRITSECT_STRICT)) \
+ || defined(DOXYGEN_RUNNING)
# define PDMCRITSECT_STRICT
#endif
+/** @def PDMCRITSECT_STRICT
+ * Enables/disables PDM read/write critsect strictness like deadlock
+ * detection. */
+#if (defined(RT_LOCK_STRICT) && defined(IN_RING3) && !defined(IEM_VERIFICATION_MODE) && !defined(PDMCRITSECTRW_STRICT)) \
+ || defined(DOXYGEN_RUNNING)
+# define PDMCRITSECTRW_STRICT
+#endif
+
/*******************************************************************************
* Structures and Typedefs *
@@ -259,7 +272,8 @@ typedef struct PDMDRVINSINT
*/
typedef struct PDMCRITSECTINT
{
- /** The critical section core which is shared with IPRT. */
+ /** The critical section core which is shared with IPRT.
+ * @note The semaphore is a SUPSEMEVENT. */
RTCRITSECT Core;
/** Pointer to the next critical section.
* This chain is used for relocating pVMRC and device cleanup. */
@@ -306,6 +320,64 @@ typedef PDMCRITSECTINT *PPDMCRITSECTINT;
/**
+ * Private critical section data.
+ */
+typedef struct PDMCRITSECTRWINT
+{
+ /** The read/write critical section core which is shared with IPRT.
+ * @note The semaphores are SUPSEMEVENT and SUPSEMEVENTMULTI. */
+ RTCRITSECTRW Core;
+
+ /** Pointer to the next critical section.
+ * This chain is used for relocating pVMRC and device cleanup. */
+ R3PTRTYPE(struct PDMCRITSECTRWINT *) pNext;
+ /** Owner identifier.
+ * This is pDevIns if the owner is a device. Similarly for a driver or service.
+ * PDMR3CritSectInit() sets this to point to the critsect itself. */
+ RTR3PTR pvKey;
+ /** Pointer to the VM - R3Ptr. */
+ PVMR3 pVMR3;
+ /** Pointer to the VM - R0Ptr. */
+ PVMR0 pVMR0;
+ /** Pointer to the VM - GCPtr. */
+ PVMRC pVMRC;
+#if HC_ARCH_BITS == 64
+ /** Alignment padding. */
+ RTRCPTR RCPtrPadding;
+#endif
+ /** The lock name. */
+ R3PTRTYPE(const char *) pszName;
+ /** R0/RC write lock contention. */
+ STAMCOUNTER StatContentionRZEnterExcl;
+ /** R0/RC write unlock contention. */
+ STAMCOUNTER StatContentionRZLeaveExcl;
+ /** R0/RC read lock contention. */
+ STAMCOUNTER StatContentionRZEnterShared;
+ /** R0/RC read unlock contention. */
+ STAMCOUNTER StatContentionRZLeaveShared;
+ /** R0/RC writes. */
+ STAMCOUNTER StatRZEnterExcl;
+ /** R0/RC reads. */
+ STAMCOUNTER StatRZEnterShared;
+ /** R3 write lock contention. */
+ STAMCOUNTER StatContentionR3EnterExcl;
+ /** R3 read lock contention. */
+ STAMCOUNTER StatContentionR3EnterShared;
+ /** R3 writes. */
+ STAMCOUNTER StatR3EnterExcl;
+ /** R3 reads. */
+ STAMCOUNTER StatR3EnterShared;
+ /** Profiling the time the section is write locked. */
+ STAMPROFILEADV StatWriteLocked;
+} PDMCRITSECTRWINT;
+AssertCompileMemberAlignment(PDMCRITSECTRWINT, StatContentionRZEnterExcl, 8);
+AssertCompileMemberAlignment(PDMCRITSECTRWINT, Core.u64State, 8);
+/** Pointer to private critical section data. */
+typedef PDMCRITSECTRWINT *PPDMCRITSECTRWINT;
+
+
+
+/**
* The usual device/driver/internal/external stuff.
*/
typedef enum
@@ -351,6 +423,7 @@ typedef struct PDMTHREADINT
#define PDMUSBINSINT_DECLARED
#define PDMDRVINSINT_DECLARED
#define PDMCRITSECTINT_DECLARED
+#define PDMCRITSECTRWINT_DECLARED
#define PDMTHREADINT_DECLARED
#ifdef ___VBox_pdm_h
# error "Invalid header PDM order. Include PDMInternal.h before VBox/vmm/pdm.h!"
@@ -487,13 +560,13 @@ typedef struct PDMAPIC
/** Pointer to the APIC device instance - R3 Ptr. */
PPDMDEVINSR3 pDevInsR3;
/** @copydoc PDMAPICREG::pfnGetInterruptR3 */
- DECLR3CALLBACKMEMBER(int, pfnGetInterruptR3,(PPDMDEVINS pDevIns, uint32_t *puTagSrc));
+ DECLR3CALLBACKMEMBER(int, pfnGetInterruptR3,(PPDMDEVINS pDevIns, VMCPUID idCpu, uint32_t *puTagSrc));
/** @copydoc PDMAPICREG::pfnHasPendingIrqR3 */
- DECLR3CALLBACKMEMBER(bool, pfnHasPendingIrqR3,(PPDMDEVINS pDevIns));
+ DECLR3CALLBACKMEMBER(bool, pfnHasPendingIrqR3,(PPDMDEVINS pDevIns, VMCPUID idCpu, uint8_t *pu8PendingIrq));
/** @copydoc PDMAPICREG::pfnSetBaseR3 */
- DECLR3CALLBACKMEMBER(void, pfnSetBaseR3,(PPDMDEVINS pDevIns, uint64_t u64Base));
+ DECLR3CALLBACKMEMBER(void, pfnSetBaseR3,(PPDMDEVINS pDevIns, VMCPUID idCpu, uint64_t u64Base));
/** @copydoc PDMAPICREG::pfnGetBaseR3 */
- DECLR3CALLBACKMEMBER(uint64_t, pfnGetBaseR3,(PPDMDEVINS pDevIns));
+ DECLR3CALLBACKMEMBER(uint64_t, pfnGetBaseR3,(PPDMDEVINS pDevIns, VMCPUID idCpu));
/** @copydoc PDMAPICREG::pfnSetTPRR3 */
DECLR3CALLBACKMEMBER(void, pfnSetTPRR3,(PPDMDEVINS pDevIns, VMCPUID idCpu, uint8_t u8TPR));
/** @copydoc PDMAPICREG::pfnGetTPRR3 */
@@ -511,13 +584,13 @@ typedef struct PDMAPIC
/** Pointer to the APIC device instance - R0 Ptr. */
PPDMDEVINSR0 pDevInsR0;
/** @copydoc PDMAPICREG::pfnGetInterruptR3 */
- DECLR0CALLBACKMEMBER(int, pfnGetInterruptR0,(PPDMDEVINS pDevIns, uint32_t *puTagSrc));
+ DECLR0CALLBACKMEMBER(int, pfnGetInterruptR0,(PPDMDEVINS pDevIns, VMCPUID idCpu, uint32_t *puTagSrc));
/** @copydoc PDMAPICREG::pfnHasPendingIrqR3 */
- DECLR0CALLBACKMEMBER(bool, pfnHasPendingIrqR0,(PPDMDEVINS pDevIns));
+ DECLR0CALLBACKMEMBER(bool, pfnHasPendingIrqR0,(PPDMDEVINS pDevIns, VMCPUID idCpu, uint8_t *pu8PendingIrq));
/** @copydoc PDMAPICREG::pfnSetBaseR3 */
- DECLR0CALLBACKMEMBER(void, pfnSetBaseR0,(PPDMDEVINS pDevIns, uint64_t u64Base));
+ DECLR0CALLBACKMEMBER(void, pfnSetBaseR0,(PPDMDEVINS pDevIns, VMCPUID idCpu, uint64_t u64Base));
/** @copydoc PDMAPICREG::pfnGetBaseR3 */
- DECLR0CALLBACKMEMBER(uint64_t, pfnGetBaseR0,(PPDMDEVINS pDevIns));
+ DECLR0CALLBACKMEMBER(uint64_t, pfnGetBaseR0,(PPDMDEVINS pDevIns, VMCPUID idCpu));
/** @copydoc PDMAPICREG::pfnSetTPRR3 */
DECLR0CALLBACKMEMBER(void, pfnSetTPRR0,(PPDMDEVINS pDevIns, VMCPUID idCpu, uint8_t u8TPR));
/** @copydoc PDMAPICREG::pfnGetTPRR3 */
@@ -535,13 +608,13 @@ typedef struct PDMAPIC
/** Pointer to the APIC device instance - RC Ptr. */
PPDMDEVINSRC pDevInsRC;
/** @copydoc PDMAPICREG::pfnGetInterruptR3 */
- DECLRCCALLBACKMEMBER(int, pfnGetInterruptRC,(PPDMDEVINS pDevIns, uint32_t *puTagSrc));
+ DECLRCCALLBACKMEMBER(int, pfnGetInterruptRC,(PPDMDEVINS pDevIns, VMCPUID idCpu, uint32_t *puTagSrc));
/** @copydoc PDMAPICREG::pfnHasPendingIrqR3 */
- DECLRCCALLBACKMEMBER(bool, pfnHasPendingIrqRC,(PPDMDEVINS pDevIns));
+ DECLRCCALLBACKMEMBER(bool, pfnHasPendingIrqRC,(PPDMDEVINS pDevIns, VMCPUID idCpu, uint8_t *pu8PendingIrq));
/** @copydoc PDMAPICREG::pfnSetBaseR3 */
- DECLRCCALLBACKMEMBER(void, pfnSetBaseRC,(PPDMDEVINS pDevIns, uint64_t u64Base));
+ DECLRCCALLBACKMEMBER(void, pfnSetBaseRC,(PPDMDEVINS pDevIns, VMCPUID idCpu, uint64_t u64Base));
/** @copydoc PDMAPICREG::pfnGetBaseR3 */
- DECLRCCALLBACKMEMBER(uint64_t, pfnGetBaseRC,(PPDMDEVINS pDevIns));
+ DECLRCCALLBACKMEMBER(uint64_t, pfnGetBaseRC,(PPDMDEVINS pDevIns, VMCPUID idCpu));
/** @copydoc PDMAPICREG::pfnSetTPRR3 */
DECLRCCALLBACKMEMBER(void, pfnSetTPRRC,(PPDMDEVINS pDevIns, VMCPUID idCpu, uint8_t u8TPR));
/** @copydoc PDMAPICREG::pfnGetTPRR3 */
@@ -615,10 +688,6 @@ typedef struct PDMPCIBUS
/** @copydoc PDMPCIBUSREG::pfnSetConfigCallbacksR3 */
DECLR3CALLBACKMEMBER(void, pfnSetConfigCallbacksR3,(PPDMDEVINS pDevIns, PPCIDEVICE pPciDev, PFNPCICONFIGREAD pfnRead,
PPFNPCICONFIGREAD ppfnReadOld, PFNPCICONFIGWRITE pfnWrite, PPFNPCICONFIGWRITE ppfnWriteOld));
- /** @copydoc PDMPCIBUSREG::pfnSaveExecR3 */
- DECLR3CALLBACKMEMBER(int, pfnSaveExecR3,(PPDMDEVINS pDevIns, PPCIDEVICE pPciDev, PSSMHANDLE pSSMHandle));
- /** @copydoc PDMPCIBUSREG::pfnLoadExecR3 */
- DECLR3CALLBACKMEMBER(int, pfnLoadExecR3,(PPDMDEVINS pDevIns, PPCIDEVICE pPciDev, PSSMHANDLE pSSMHandle));
/** @copydoc PDMPCIBUSREG::pfnFakePCIBIOSR3 */
DECLR3CALLBACKMEMBER(int, pfnFakePCIBIOSR3,(PPDMDEVINS pDevIns));
@@ -945,16 +1014,36 @@ typedef struct PDMBLKCACHEGLOBAL *PPDMBLKCACHEGLOBAL;
/**
* PDM VMCPU Instance data.
- * Changes to this must checked against the padding of the cfgm union in VMCPU!
+ * Changes to this must checked against the padding of the pdm union in VMCPU!
*/
typedef struct PDMCPU
{
- /** The number of entries in the apQueuedCritSectsLeaves table that's currently in use. */
+ /** The number of entries in the apQueuedCritSectsLeaves table that's currently
+ * in use. */
uint32_t cQueuedCritSectLeaves;
uint32_t uPadding0; /**< Alignment padding.*/
- /** Critical sections queued in RC/R0 because of contention preventing leave to complete. (R3 Ptrs)
+ /** Critical sections queued in RC/R0 because of contention preventing leave to
+ * complete. (R3 Ptrs)
+ * We will return to Ring-3 ASAP, so this queue doesn't have to be very long. */
+ R3PTRTYPE(PPDMCRITSECT) apQueuedCritSectLeaves[8];
+
+ /** The number of entries in the apQueuedCritSectRwExclLeaves table that's
+ * currently in use. */
+ uint32_t cQueuedCritSectRwExclLeaves;
+ uint32_t uPadding1; /**< Alignment padding.*/
+ /** Read/write critical sections queued in RC/R0 because of contention
+ * preventing exclusive leave to complete. (R3 Ptrs)
+ * We will return to Ring-3 ASAP, so this queue doesn't have to be very long. */
+ R3PTRTYPE(PPDMCRITSECTRW) apQueuedCritSectRwExclLeaves[8];
+
+ /** The number of entries in the apQueuedCritSectsRwShrdLeaves table that's
+ * currently in use. */
+ uint32_t cQueuedCritSectRwShrdLeaves;
+ uint32_t uPadding2; /**< Alignment padding.*/
+ /** Read/write critical sections queued in RC/R0 because of contention
+ * preventing shared leave to complete. (R3 Ptrs)
* We will return to Ring-3 ASAP, so this queue doesn't have to be very long. */
- R3PTRTYPE(PPDMCRITSECT) apQueuedCritSectsLeaves[8];
+ R3PTRTYPE(PPDMCRITSECTRW) apQueuedCritSectRwShrdLeaves[8];
} PDMCPU;
@@ -1073,6 +1162,8 @@ typedef struct PDMUSERPERVM
PPDMMOD pModules;
/** List of initialized critical sections. (LIFO) */
R3PTRTYPE(PPDMCRITSECTINT) pCritSects;
+ /** List of initialized read/write critical sections. (LIFO) */
+ R3PTRTYPE(PPDMCRITSECTRWINT) pRwCritSects;
/** Head of the PDM Thread list. (singly linked) */
R3PTRTYPE(PPDMTHREAD) pThreads;
/** Tail of the PDM Thread list. (singly linked) */
@@ -1085,13 +1176,14 @@ typedef struct PDMUSERPERVM
/** Head of the templates. Singly linked, protected by ListCritSect. */
R3PTRTYPE(PPDMASYNCCOMPLETIONTEMPLATE) pAsyncCompletionTemplates;
/** @} */
+
+ /** Global block cache data. */
+ R3PTRTYPE(PPDMBLKCACHEGLOBAL) pBlkCacheGlobal;
#ifdef VBOX_WITH_NETSHAPER
/** Pointer to network shaper instance. */
R3PTRTYPE(PPDMNETSHAPER) pNetShaper;
#endif /* VBOX_WITH_NETSHAPER */
- R3PTRTYPE(PPDMBLKCACHEGLOBAL) pBlkCacheGlobal;
-
} PDMUSERPERVM;
/** Pointer to the PDM data kept in the UVM. */
typedef PDMUSERPERVM *PPDMUSERPERVM;
@@ -1154,14 +1246,22 @@ extern const PDMPCIRAWHLPR3 g_pdmR3DevPciRawHlp;
#ifdef IN_RING3
bool pdmR3IsValidName(const char *pszName);
-int pdmR3CritSectInitStats(PVM pVM);
-void pdmR3CritSectRelocate(PVM pVM);
-int pdmR3CritSectInitDevice(PVM pVM, PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect, RT_SRC_POS_DECL, const char *pszNameFmt, va_list va);
-int pdmR3CritSectInitDeviceAuto(PVM pVM, PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect, RT_SRC_POS_DECL,
- const char *pszNameFmt, ...);
-int pdmR3CritSectDeleteDevice(PVM pVM, PPDMDEVINS pDevIns);
-int pdmR3CritSectInitDriver(PVM pVM, PPDMDRVINS pDrvIns, PPDMCRITSECT pCritSect, RT_SRC_POS_DECL, const char *pszNameFmt, ...);
-int pdmR3CritSectDeleteDriver(PVM pVM, PPDMDRVINS pDrvIns);
+int pdmR3CritSectBothInitStats(PVM pVM);
+void pdmR3CritSectBothRelocate(PVM pVM);
+int pdmR3CritSectBothDeleteDevice(PVM pVM, PPDMDEVINS pDevIns);
+int pdmR3CritSectBothDeleteDriver(PVM pVM, PPDMDRVINS pDrvIns);
+int pdmR3CritSectInitDevice( PVM pVM, PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, va_list va);
+int pdmR3CritSectInitDeviceAuto( PVM pVM, PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, ...);
+int pdmR3CritSectInitDriver( PVM pVM, PPDMDRVINS pDrvIns, PPDMCRITSECT pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, ...);
+int pdmR3CritSectRwInitDevice( PVM pVM, PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, va_list va);
+int pdmR3CritSectRwInitDeviceAuto( PVM pVM, PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, ...);
+int pdmR3CritSectRwInitDriver( PVM pVM, PPDMDRVINS pDrvIns, PPDMCRITSECTRW pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, ...);
int pdmR3DevInit(PVM pVM);
PPDMDEV pdmR3DevLookup(PVM pVM, const char *pszName);
@@ -1206,6 +1306,13 @@ int pdmR3ThreadSuspendAll(PVM pVM);
int pdmR3AsyncCompletionInit(PVM pVM);
int pdmR3AsyncCompletionTerm(PVM pVM);
void pdmR3AsyncCompletionResume(PVM pVM);
+int pdmR3AsyncCompletionTemplateCreateDevice(PVM pVM, PPDMDEVINS pDevIns, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate, PFNPDMASYNCCOMPLETEDEV pfnCompleted, const char *pszDesc);
+int pdmR3AsyncCompletionTemplateCreateDriver(PVM pVM, PPDMDRVINS pDrvIns, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate,
+ PFNPDMASYNCCOMPLETEDRV pfnCompleted, void *pvTemplateUser, const char *pszDesc);
+int pdmR3AsyncCompletionTemplateCreateUsb(PVM pVM, PPDMUSBINS pUsbIns, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate, PFNPDMASYNCCOMPLETEUSB pfnCompleted, const char *pszDesc);
+int pdmR3AsyncCompletionTemplateDestroyDevice(PVM pVM, PPDMDEVINS pDevIns);
+int pdmR3AsyncCompletionTemplateDestroyDriver(PVM pVM, PPDMDRVINS pDrvIns);
+int pdmR3AsyncCompletionTemplateDestroyUsb(PVM pVM, PPDMUSBINS pUsbIns);
#endif
#ifdef VBOX_WITH_NETSHAPER
@@ -1223,6 +1330,11 @@ void pdmLock(PVM pVM);
int pdmLockEx(PVM pVM, int rc);
void pdmUnlock(PVM pVM);
+#if defined(IN_RING3) || defined(IN_RING0)
+void pdmCritSectRwLeaveSharedQueued(PPDMCRITSECTRW pThis);
+void pdmCritSectRwLeaveExclQueued(PPDMCRITSECTRW pThis);
+#endif
+
/** @} */
RT_C_DECLS_END
diff --git a/src/VBox/VMM/include/PDMNetShaperInternal.h b/src/VBox/VMM/include/PDMNetShaperInternal.h
new file mode 100644
index 00000000..33d8e7e8
--- /dev/null
+++ b/src/VBox/VMM/include/PDMNetShaperInternal.h
@@ -0,0 +1,46 @@
+/* $Id: PDMNetShaperInternal.h $ */
+/** @file
+ * PDM Network Shaper - Internal data structures and functions common for both R0 and R3 parts.
+ */
+
+/*
+ * Copyright (C) 2011-2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+/**
+ * Bandwidth group instance data
+ */
+typedef struct PDMNSBWGROUP
+{
+ /** Pointer to the next group in the list. */
+ R3PTRTYPE(struct PDMNSBWGROUP *) pNextR3;
+ /** Pointer to the shared UVM structure. */
+ R3PTRTYPE(struct PDMNETSHAPER *) pShaperR3;
+ /** Critical section protecting all members below. */
+ PDMCRITSECT Lock;
+ /** Pointer to the first filter attached to this group. */
+ R3PTRTYPE(struct PDMNSFILTER *) pFiltersHeadR3;
+ /** Bandwidth group name. */
+ R3PTRTYPE(char *) pszNameR3;
+ /** Maximum number of bytes filters are allowed to transfer. */
+ volatile uint64_t cbPerSecMax;
+ /** Number of bytes we are allowed to transfer in one burst. */
+ volatile uint32_t cbBucket;
+ /** Number of bytes we were allowed to transfer at the last update. */
+ volatile uint32_t cbTokensLast;
+ /** Timestamp of the last update */
+ volatile uint64_t tsUpdatedLast;
+ /** Reference counter - How many filters are associated with this group. */
+ volatile uint32_t cRefs;
+} PDMNSBWGROUP;
+/** Pointer to a bandwidth group. */
+typedef PDMNSBWGROUP *PPDMNSBWGROUP;
+
diff --git a/src/VBox/VMM/include/PGMGstDefs.h b/src/VBox/VMM/include/PGMGstDefs.h
index 5fe79db5..c531a4e3 100644
--- a/src/VBox/VMM/include/PGMGstDefs.h
+++ b/src/VBox/VMM/include/PGMGstDefs.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2010 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
diff --git a/src/VBox/VMM/include/PGMInline.h b/src/VBox/VMM/include/PGMInline.h
index a3efcf52..995cad9b 100644
--- a/src/VBox/VMM/include/PGMInline.h
+++ b/src/VBox/VMM/include/PGMInline.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2010 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -31,7 +31,7 @@
#include <VBox/vmm/dbgf.h>
#include <VBox/log.h>
#include <VBox/vmm/gmm.h>
-#include <VBox/vmm/hwaccm.h>
+#include <VBox/vmm/hm.h>
#include <iprt/asm.h>
#include <iprt/assert.h>
#include <iprt/avl.h>
@@ -1484,10 +1484,11 @@ DECL_FORCE_INLINE(bool) pgmMapAreMappingsEnabled(PVM pVM)
{
#ifdef PGM_WITHOUT_MAPPINGS
/* There are no mappings in VT-x and AMD-V mode. */
- Assert(pVM->pgm.s.fMappingsDisabled);
+ Assert(HMIsEnabled(pVM));
return false;
#else
- return !pVM->pgm.s.fMappingsDisabled;
+ Assert(pVM->cCpus == 1 || HMIsEnabled(pVM));
+ return !HMIsEnabled(pVM);
#endif
}
@@ -1502,11 +1503,11 @@ DECL_FORCE_INLINE(bool) pgmMapAreMappingsFloating(PVM pVM)
{
#ifdef PGM_WITHOUT_MAPPINGS
/* There are no mappings in VT-x and AMD-V mode. */
- Assert(pVM->pgm.s.fMappingsDisabled);
+ Assert(HMIsEnabled(pVM));
return false;
#else
- return !pVM->pgm.s.fMappingsDisabled
- && !pVM->pgm.s.fMappingsFixed;
+ return !pVM->pgm.s.fMappingsFixed
+ && pgmMapAreMappingsEnabled(pVM);
#endif
}
diff --git a/src/VBox/VMM/include/PGMInternal.h b/src/VBox/VMM/include/PGMInternal.h
index 38d605c4..fff44af9 100644
--- a/src/VBox/VMM/include/PGMInternal.h
+++ b/src/VBox/VMM/include/PGMInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2010 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -32,8 +32,8 @@
#include <VBox/vmm/dbgf.h>
#include <VBox/log.h>
#include <VBox/vmm/gmm.h>
-#include <VBox/vmm/hwaccm.h>
-#include <VBox/vmm/hwacc_vmx.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/hm_vmx.h>
#include "internal/pgm.h"
#include <iprt/asm.h>
#include <iprt/assert.h>
@@ -55,10 +55,28 @@
*/
/**
- * Indicates that there are no guest mappings to care about.
- * Currently on raw-mode related code uses mappings, i.e. RC and R3 code.
- */
-#if defined(IN_RING0) || !defined(VBOX_WITH_RAW_MODE)
+ * Indicates that there are no guest mappings in the shadow tables.
+ *
+ * Note! In ring-3 the macro is also used to exclude the managment of the
+ * intermediate context page tables. On 32-bit systems we use the intermediate
+ * context to support 64-bit guest execution. Thus, we cannot fully make it
+ * without mappings there even when VBOX_WITH_RAW_MODE is not defined.
+ *
+ * In raw-mode context there are by design always guest mappings (the code is
+ * executed from one), while in ring-0 there are none at all. Neither context
+ * manages the page tables for intermediate switcher context, that's all done in
+ * ring-3.
+ *
+ * On 32-bit darwin (hybrid kernel) we do 64-bit guest support differently, so
+ * there we can safely work without mappings if we don't compile in raw-mode.
+ */
+#if defined(IN_RING0) \
+ || ( !defined(VBOX_WITH_RAW_MODE) \
+ && ( HC_ARCH_BITS != 32 \
+ || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) \
+ || !defined(VBOX_WITH_64_BITS_GUESTS) \
+ ) \
+ )
# define PGM_WITHOUT_MAPPINGS
#endif
@@ -347,9 +365,9 @@
#ifdef IN_RC
# define PGM_INVL_PG(pVCpu, GCVirt) ASMInvalidatePage((void *)(uintptr_t)(GCVirt))
#elif defined(IN_RING0)
-# define PGM_INVL_PG(pVCpu, GCVirt) HWACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
+# define PGM_INVL_PG(pVCpu, GCVirt) HMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
#else
-# define PGM_INVL_PG(pVCpu, GCVirt) HWACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
+# define PGM_INVL_PG(pVCpu, GCVirt) HMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
#endif
/** @def PGM_INVL_PG_ALL_VCPU
@@ -361,9 +379,9 @@
#ifdef IN_RC
# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) ASMInvalidatePage((void *)(uintptr_t)(GCVirt))
#elif defined(IN_RING0)
-# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
+# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
#else
-# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
+# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
#endif
/** @def PGM_INVL_BIG_PG
@@ -375,9 +393,9 @@
#ifdef IN_RC
# define PGM_INVL_BIG_PG(pVCpu, GCVirt) ASMReloadCR3()
#elif defined(IN_RING0)
-# define PGM_INVL_BIG_PG(pVCpu, GCVirt) HWACCMFlushTLB(pVCpu)
+# define PGM_INVL_BIG_PG(pVCpu, GCVirt) HMFlushTLB(pVCpu)
#else
-# define PGM_INVL_BIG_PG(pVCpu, GCVirt) HWACCMFlushTLB(pVCpu)
+# define PGM_INVL_BIG_PG(pVCpu, GCVirt) HMFlushTLB(pVCpu)
#endif
/** @def PGM_INVL_VCPU_TLBS()
@@ -388,9 +406,9 @@
#ifdef IN_RC
# define PGM_INVL_VCPU_TLBS(pVCpu) ASMReloadCR3()
#elif defined(IN_RING0)
-# define PGM_INVL_VCPU_TLBS(pVCpu) HWACCMFlushTLB(pVCpu)
+# define PGM_INVL_VCPU_TLBS(pVCpu) HMFlushTLB(pVCpu)
#else
-# define PGM_INVL_VCPU_TLBS(pVCpu) HWACCMFlushTLB(pVCpu)
+# define PGM_INVL_VCPU_TLBS(pVCpu) HMFlushTLB(pVCpu)
#endif
/** @def PGM_INVL_ALL_VCPU_TLBS()
@@ -401,9 +419,9 @@
#ifdef IN_RC
# define PGM_INVL_ALL_VCPU_TLBS(pVM) ASMReloadCR3()
#elif defined(IN_RING0)
-# define PGM_INVL_ALL_VCPU_TLBS(pVM) HWACCMFlushTLBOnAllVCpus(pVM)
+# define PGM_INVL_ALL_VCPU_TLBS(pVM) HMFlushTLBOnAllVCpus(pVM)
#else
-# define PGM_INVL_ALL_VCPU_TLBS(pVM) HWACCMFlushTLBOnAllVCpus(pVM)
+# define PGM_INVL_ALL_VCPU_TLBS(pVM) HMFlushTLBOnAllVCpus(pVM)
#endif
@@ -712,7 +730,11 @@ typedef union PGMPAGE
/** 63:54 - PTE index for usage tracking (page pool). */
uint64_t u10PteIdx : 10;
- /** The GMM page ID. */
+ /** The GMM page ID.
+ * @remarks In the current implementation, MMIO2 and pages aliased to
+ * MMIO2 pages will be exploiting this field to calculate the
+ * ring-3 mapping address corresponding to the page.
+ * Later we may consider including MMIO2 management into GMM. */
uint32_t idPage;
/** Usage tracking (page pool). */
uint16_t u16TrackingY;
@@ -943,13 +965,38 @@ typedef PPGMPAGE *PPPGMPAGE;
do { (a_pPage)->s.u10PteIdx = (a_iPte); PGM_PAGE_ASSERT_LOCK(a_pVM); } while (0)
/**
- * Checks if the page is marked for MMIO.
+ * Checks if the page is marked for MMIO, no MMIO2 aliasing.
* @returns true/false.
* @param a_pPage Pointer to the physical guest page tracking structure.
*/
#define PGM_PAGE_IS_MMIO(a_pPage) ( (a_pPage)->s.uTypeY == PGMPAGETYPE_MMIO )
/**
+ * Checks if the page is marked for MMIO, including both aliases.
+ * @returns true/false.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_IS_MMIO_OR_ALIAS(a_pPage) ( (a_pPage)->s.uTypeY == PGMPAGETYPE_MMIO \
+ || (a_pPage)->s.uTypeY == PGMPAGETYPE_MMIO2_ALIAS_MMIO \
+ || (a_pPage)->s.uTypeY == PGMPAGETYPE_SPECIAL_ALIAS_MMIO \
+ )
+
+/**
+ * Checks if the page is marked for MMIO, including special aliases.
+ * @returns true/false.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(a_pPage) ( (a_pPage)->s.uTypeY == PGMPAGETYPE_MMIO \
+ || (a_pPage)->s.uTypeY == PGMPAGETYPE_SPECIAL_ALIAS_MMIO )
+
+/**
+ * Checks if the page is a special aliased MMIO page.
+ * @returns true/false.
+ * @param a_pPage Pointer to the physical guest page tracking structure.
+ */
+#define PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(a_pPage) ( (a_pPage)->s.uTypeY == PGMPAGETYPE_SPECIAL_ALIAS_MMIO )
+
+/**
* Checks if the page is backed by the ZERO page.
* @returns true/false.
* @param a_pPage Pointer to the physical guest page tracking structure.
@@ -983,7 +1030,7 @@ typedef PPGMPAGE *PPPGMPAGE;
* @param a_pPage Pointer to the physical guest page tracking structure.
*/
#define PGM_PAGE_SET_WRITTEN_TO(a_pVM, a_pPage) \
- do { (a_pPage)->au8[1] |= UINT8_C(0x80); PGM_PAGE_ASSERT_LOCK(a_pVM); } while (0) /// FIXME FIXME
+ do { (a_pPage)->s.fWrittenToY = 1; PGM_PAGE_ASSERT_LOCK(a_pVM); } while (0)
/**
* Clears the written-to indicator.
@@ -1566,8 +1613,10 @@ typedef struct PGMMMIO2RANGE
uint8_t iRegion;
/** The saved state range ID. */
uint8_t idSavedState;
+ /** MMIO2 range identifier, for page IDs (PGMPAGE::s.idPage). */
+ uint8_t idMmio2;
/** Alignment padding for putting the ram range on a PGMPAGE alignment boundary. */
- uint8_t abAlignemnt[HC_ARCH_BITS == 32 ? 12 : 12];
+ uint8_t abAlignment[HC_ARCH_BITS == 32 ? 11 : 11];
/** Live save per page tracking data. */
R3PTRTYPE(PPGMLIVESAVEMMIO2PAGE) paLSPages;
/** The associated RAM range. */
@@ -1576,6 +1625,19 @@ typedef struct PGMMMIO2RANGE
/** Pointer to a MMIO2 range. */
typedef PGMMMIO2RANGE *PPGMMMIO2RANGE;
+/** @name Intenal MMIO2 constants.
+ * @{ */
+/** The maximum number of MMIO2 ranges. */
+#define PGM_MMIO2_MAX_RANGES 8
+/** The maximum number of pages in a MMIO2 range. */
+#define PGM_MMIO2_MAX_PAGE_COUNT UINT32_C(0x00ffffff)
+/** Makes a MMIO2 page ID out of a MMIO2 range ID and page index number. */
+#define PGM_MMIO2_PAGEID_MAKE(a_idMmio2, a_iPage) ( ((uint32_t)(a_idMmio2) << 24) | (uint32_t)(a_iPage) )
+/** Gets the MMIO2 range ID from an MMIO2 page ID. */
+#define PGM_MMIO2_PAGEID_GET_MMIO2_ID(a_idPage) ( (uint8_t)((a_idPage) >> 24) )
+/** Gets the MMIO2 page index from an MMIO2 page ID. */
+#define PGM_MMIO2_PAGEID_GET_IDX(a_idPage) ( ((a_idPage) & UINT32_C(0x00ffffff)) )
+/** @} */
@@ -1944,18 +2006,9 @@ typedef PGMMAPSET *PPGMMAPSET;
* @{ */
/** NIL page pool IDX. */
#define NIL_PGMPOOL_IDX 0
-/** The first normal index. */
-#define PGMPOOL_IDX_FIRST_SPECIAL 1
-/** Page directory (32-bit root). */
-#define PGMPOOL_IDX_PD 1
-/** Page Directory Pointer Table (PAE root). */
-#define PGMPOOL_IDX_PDPT 2
-/** AMD64 CR3 level index.*/
-#define PGMPOOL_IDX_AMD64_CR3 3
-/** Nested paging root.*/
-#define PGMPOOL_IDX_NESTED_ROOT 4
-/** The first normal index. */
-#define PGMPOOL_IDX_FIRST 5
+/** The first normal index. There used to be 5 fictive pages up front, now
+ * there is only the NIL page. */
+#define PGMPOOL_IDX_FIRST 1
/** The last valid index. (inclusive, 14 bits) */
#define PGMPOOL_IDX_LAST 0x3fff
/** @} */
@@ -2689,6 +2742,48 @@ typedef PGMPTWALKGST32BIT *PPGMPTWALKGST32BIT;
/** Pointer to a const 32-bit guest page table walk. */
typedef PGMPTWALKGST32BIT const *PCPGMPTWALKGST32BIT;
+/**
+ * Which part of PGMPTWALKGST that is valid.
+ */
+typedef enum PGMPTWALKGSTTYPE
+{
+ /** Customary invalid 0 value. */
+ PGMPTWALKGSTTYPE_INVALID = 0,
+ /** PGMPTWALKGST::u.Amd64 is valid. */
+ PGMPTWALKGSTTYPE_AMD64,
+ /** PGMPTWALKGST::u.Pae is valid. */
+ PGMPTWALKGSTTYPE_PAE,
+ /** PGMPTWALKGST::u.Legacy is valid. */
+ PGMPTWALKGSTTYPE_32BIT,
+ /** Customary 32-bit type hack. */
+ PGMPTWALKGSTTYPE_32BIT_HACK = 0x7fff0000
+} PGMPTWALKGSTTYPE;
+
+/**
+ * Combined guest page table walk result.
+ */
+typedef struct PGMPTWALKGST
+{
+ union
+ {
+ /** The page walker core - always valid. */
+ PGMPTWALKCORE Core;
+ /** The page walker for AMD64. */
+ PGMPTWALKGSTAMD64 Amd64;
+ /** The page walker for PAE (32-bit). */
+ PGMPTWALKGSTPAE Pae;
+ /** The page walker for 32-bit paging (called legacy due to C naming
+ * convension). */
+ PGMPTWALKGST32BIT Legacy;
+ } u;
+ /** Indicates which part of the union is valid. */
+ PGMPTWALKGSTTYPE enmType;
+} PGMPTWALKGST;
+/** Pointer to a combined guest page table walk result. */
+typedef PGMPTWALKGST *PPGMPTWALKGST;
+/** Pointer to a read-only combined guest page table walk result. */
+typedef PGMPTWALKGST const *PCPGMPTWALKGST;
+
/** @name Paging mode macros
* @{
@@ -3023,7 +3118,7 @@ typedef struct PGM
* This is used */
bool fLessThan52PhysicalAddressBits;
/** Set when nested paging is active.
- * This is meant to save calls to HWACCMIsNestedPagingActive and let the
+ * This is meant to save calls to HMIsNestedPagingActive and let the
* compilers optimize the code better. Whether we use nested paging or
* not is something we find out during VMM initialization and we won't
* change this later on. */
@@ -3035,6 +3130,8 @@ typedef struct PGM
bool fNoMorePhysWrites;
/** Set if PCI passthrough is enabled. */
bool fPciPassthrough;
+ /** The number of MMIO2 regions (serves as the next MMIO2 ID). */
+ uint8_t cMmio2Regions;
/** Alignment padding that makes the next member start on a 8 byte boundary. */
bool afAlignment1[2];
@@ -3046,9 +3143,6 @@ typedef struct PGM
/** If set if restored as fixed but we were unable to re-fixate at the old
* location because of room or address incompatibilities. */
bool fMappingsFixedRestored;
- /** If set, then no mappings are put into the shadow page table.
- * Use pgmMapAreMappingsEnabled() instead of direct access. */
- bool fMappingsDisabled;
/** Size of fixed mapping.
* This is valid if either fMappingsFixed or fMappingsFixedRestored is set. */
uint32_t cbMappingFixed;
@@ -3095,6 +3189,8 @@ typedef struct PGM
* The index into this table is made up from */
R3PTRTYPE(PPGMMODEDATA) paModeData;
RTR3PTR R3PtrAlignment0;
+ /** MMIO2 lookup array for ring-3. Indexed by idMmio2 minus 1. */
+ R3PTRTYPE(PPGMMMIO2RANGE) apMmio2RangesR3[PGM_MMIO2_MAX_RANGES];
/** RAM range TLB for R0. */
R0PTRTYPE(PPGMRAMRANGE) apRamRangesTlbR0[PGM_RAMRANGE_TLB_ENTRIES];
@@ -3114,7 +3210,8 @@ typedef struct PGM
/** R0 pointer corresponding to PGM::pRomRangesR3. */
R0PTRTYPE(PPGMROMRANGE) pRomRangesR0;
RTR0PTR R0PtrAlignment0;
-
+ /** MMIO2 lookup array for ring-3. Indexed by idMmio2 minus 1. */
+ R0PTRTYPE(PPGMMMIO2RANGE) apMmio2RangesR0[PGM_MMIO2_MAX_RANGES];
/** RAM range TLB for RC. */
RCPTRTYPE(PPGMRAMRANGE) apRamRangesTlbRC[PGM_RAMRANGE_TLB_ENTRIES];
@@ -3426,6 +3523,8 @@ typedef struct PGMCPUSTATS
STAMPROFILE StatRZTrap0eTime2OutOfSyncHndObs; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an obsolete handler page. */
STAMPROFILE StatRZTrap0eTime2SyncPT; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT. */
STAMPROFILE StatRZTrap0eTime2WPEmulation; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CR0.WP emulation. */
+ STAMPROFILE StatRZTrap0eTime2Wp0RoUsHack; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CR0.WP and netware hack to be enabled. */
+ STAMPROFILE StatRZTrap0eTime2Wp0RoUsUnhack; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CR0.WP and netware hack to be disabled. */
STAMCOUNTER StatRZTrap0eConflicts; /**< RC/R0: The number of times \#PF was caused by an undetected conflict. */
STAMCOUNTER StatRZTrap0eHandlersMapping; /**< RC/R0: Number of traps due to access handlers in mappings. */
STAMCOUNTER StatRZTrap0eHandlersOutOfSync; /**< RC/R0: Number of out-of-sync handled pages. */
@@ -3634,7 +3733,7 @@ typedef struct PGMCPU
/** What needs syncing (PGM_SYNC_*).
* This is used to queue operations for PGMSyncCR3, PGMInvalidatePage,
* PGMFlushTLB, and PGMR3Load. */
- RTUINT fSyncFlags;
+ uint32_t fSyncFlags;
/** The shadow paging mode. */
PGMMODE enmShadowMode;
@@ -3749,12 +3848,6 @@ typedef struct PGMCPU
R0PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R0;
/** Pointer to the page of the current active CR3 - RC Ptr. */
RCPTRTYPE(PPGMPOOLPAGE) pShwPageCR3RC;
- /** The shadow page pool index of the user table as specified during
- * allocation; useful for freeing root pages. */
- uint32_t iShwUser;
- /** The index into the user table (shadowed) as specified during allocation;
- * useful for freeing root pages. */
- uint32_t iShwUserTable;
# if HC_ARCH_BITS == 64
RTRCPTR alignment6; /**< structure size alignment. */
# endif
@@ -3835,6 +3928,9 @@ typedef struct PGMCPU
* on the stack. */
DISCPUSTATE DisState;
+ /** Counts the number of times the netware WP0+RO+US hack has been applied. */
+ uint64_t cNetwareWp0Hacks;
+
/** Count the number of pgm pool access handler calls. */
uint64_t cPoolAccessHandler;
@@ -3888,7 +3984,12 @@ typedef PGMCPU *PPGMCPU;
RT_C_DECLS_BEGIN
+#if defined(VBOX_STRICT) && defined(IN_RING3)
+int pgmLockDebug(PVM pVM, RT_SRC_POS_DECL);
+# define pgmLock(a_pVM) pgmLockDebug(a_pVM, RT_SRC_POS)
+#else
int pgmLock(PVM pVM);
+#endif
void pgmUnlock(PVM pVM);
/**
* Asserts that the caller owns the PDM lock.
@@ -3904,11 +4005,13 @@ void pgmUnlock(PVM pVM);
*/
#define PGM_LOCK_ASSERT_OWNER_EX(a_pVM, a_pVCpu) Assert(PDMCritSectIsOwnerEx(&(a_pVM)->pgm.s.CritSectX, pVCpu))
+#ifndef PGM_WITHOUT_MAPPINGS
int pgmR3MappingsFixInternal(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb);
int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping);
int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping);
-PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr);
int pgmMapResolveConflicts(PVM pVM);
+#endif /* !PGM_WITHOUT_MAPPINGS */
+PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr);
DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
void pgmR3HandlerPhysicalUpdateAll(PVM pVM);
@@ -3958,6 +4061,7 @@ void pgmR3PhysRelinkRamRanges(PVM pVM);
int pgmR3PhysRamPreAllocate(PVM pVM);
int pgmR3PhysRamReset(PVM pVM);
int pgmR3PhysRomReset(PVM pVM);
+int pgmR3PhysRamZeroAll(PVM pVM);
int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk);
int pgmR3PhysRamTerm(PVM pVM);
void pgmR3PhysRomTerm(PVM pVM);
@@ -4013,6 +4117,7 @@ void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAP
int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
+int pgmShwMakePageSupervisorAndWritable(PVMCPU pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags);
int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
int pgmShwSyncNestedPageLocked(PVMCPU pVCpu, RTGCPHYS GCPhysFault, uint32_t cPages, PGMMODE enmShwPagingMode);
@@ -4020,10 +4125,11 @@ int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd);
int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt);
int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd);
int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4);
+int pgmGstPtWalk(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk);
# if defined(VBOX_STRICT) && HC_ARCH_BITS == 64 && defined(IN_RING3)
-DECLCALLBACK(int) pgmR3CmdCheckDuplicatePages(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
-DECLCALLBACK(int) pgmR3CmdShowSharedModules(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
+FNDBGCCMD pgmR3CmdCheckDuplicatePages;
+FNDBGCCMD pgmR3CmdShowSharedModules;
# endif
RT_C_DECLS_END
diff --git a/src/VBox/VMM/include/REMInternal.h b/src/VBox/VMM/include/REMInternal.h
index bfe62909..1f41096e 100644
--- a/src/VBox/VMM/include/REMInternal.h
+++ b/src/VBox/VMM/include/REMInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -234,7 +234,9 @@ typedef REM *PREM;
#ifdef REM_INCLUDE_CPU_H
bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException);
void remR3CSAMCheckEIP(CPUState *env, RTGCPTR GCPtrCode);
+# ifdef VBOX_WITH_RAW_MODE
bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte);
+# endif
bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix);
void remR3FlushPage(CPUState *env, RTGCPTR GCPtr);
void remR3FlushTLB(CPUState *env, bool fGlobal);
diff --git a/src/VBox/VMM/include/SELMInline.h b/src/VBox/VMM/include/SELMInline.h
new file mode 100644
index 00000000..a1222945
--- /dev/null
+++ b/src/VBox/VMM/include/SELMInline.h
@@ -0,0 +1,316 @@
+/* $Id: SELMInline.h $ */
+/** @file
+ * SELM - Internal header file.
+ */
+
+/*
+ * Copyright (C) 2006-2012 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef ___SELMInline_h
+#define ___SELMInline_h
+
+#ifdef VBOX_WITH_RAW_MODE_NOT_R0
+
+/**
+ * Checks if a shadow descriptor table entry is good for the given segment
+ * register.
+ *
+ * @returns @c true if good, @c false if not.
+ * @param pSReg The segment register.
+ * @param pShwDesc The shadow descriptor table entry.
+ * @param iSReg The segment register index (X86_SREG_XXX).
+ * @param uCpl The CPL.
+ */
+DECLINLINE(bool) selmIsShwDescGoodForSReg(PCCPUMSELREG pSReg, PCX86DESC pShwDesc, uint32_t iSReg, uint32_t uCpl)
+{
+ /*
+ * See iemMiscValidateNewSS, iemCImpl_LoadSReg and intel+amd manuals.
+ */
+
+ if (!pShwDesc->Gen.u1Present)
+ {
+ Log(("selmIsShwDescGoodForSReg: Not present\n"));
+ return false;
+ }
+
+ if (!pShwDesc->Gen.u1DescType)
+ {
+ Log(("selmIsShwDescGoodForSReg: System descriptor\n"));
+ return false;
+ }
+
+ if (iSReg == X86_SREG_SS)
+ {
+ if ((pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
+ {
+ Log(("selmIsShwDescGoodForSReg: Stack must be writable\n"));
+ return false;
+ }
+ if (uCpl > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available)
+ {
+ Log(("selmIsShwDescGoodForSReg: CPL(%d) > DPL(%d)\n", uCpl, pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available));
+ return false;
+ }
+ }
+ else
+ {
+ if (iSReg == X86_SREG_CS)
+ {
+ if (!(pShwDesc->Gen.u4Type & X86_SEL_TYPE_CODE))
+ {
+ Log(("selmIsShwDescGoodForSReg: CS needs code segment\n"));
+ return false;
+ }
+ }
+ else if ((pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
+ {
+ Log(("selmIsShwDescGoodForSReg: iSReg=%u execute only\n", iSReg));
+ return false;
+ }
+
+ if ( (pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
+ != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
+ && ( ( (pSReg->Sel & X86_SEL_RPL) > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available
+ && (pSReg->Sel & X86_SEL_RPL) != pShwDesc->Gen.u1Available )
+ || uCpl > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available ) )
+ {
+ Log(("selmIsShwDescGoodForSReg: iSReg=%u DPL=%u CPL=%u RPL=%u\n", iSReg,
+ pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available, uCpl, pSReg->Sel & X86_SEL_RPL));
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+/**
+ * Checks if a guest descriptor table entry is good for the given segment
+ * register.
+ *
+ * @returns @c true if good, @c false if not.
+ * @param pVCpu The current virtual CPU.
+ * @param pSReg The segment register.
+ * @param pGstDesc The guest descriptor table entry.
+ * @param iSReg The segment register index (X86_SREG_XXX).
+ * @param uCpl The CPL.
+ */
+DECLINLINE(bool) selmIsGstDescGoodForSReg(PVMCPU pVCpu, PCCPUMSELREG pSReg, PCX86DESC pGstDesc, uint32_t iSReg, uint32_t uCpl)
+{
+ /*
+ * See iemMiscValidateNewSS, iemCImpl_LoadSReg and intel+amd manuals.
+ */
+
+ if (!pGstDesc->Gen.u1Present)
+ {
+ Log(("selmIsGstDescGoodForSReg: Not present\n"));
+ return false;
+ }
+
+ if (!pGstDesc->Gen.u1DescType)
+ {
+ Log(("selmIsGstDescGoodForSReg: System descriptor\n"));
+ return false;
+ }
+
+ if (iSReg == X86_SREG_SS)
+ {
+ if ((pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
+ {
+ Log(("selmIsGstDescGoodForSReg: Stack must be writable\n"));
+ return false;
+ }
+ if (uCpl > pGstDesc->Gen.u2Dpl)
+ {
+ Log(("selmIsGstDescGoodForSReg: CPL(%d) > DPL(%d)\n", uCpl, pGstDesc->Gen.u2Dpl));
+ return false;
+ }
+ }
+ else
+ {
+ if (iSReg == X86_SREG_CS)
+ {
+ if (!(pGstDesc->Gen.u4Type & X86_SEL_TYPE_CODE))
+ {
+ Log(("selmIsGstDescGoodForSReg: CS needs code segment\n"));
+ return false;
+ }
+ }
+ else if ((pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
+ {
+ Log(("selmIsGstDescGoodForSReg: iSReg=%u execute only\n", iSReg));
+ return false;
+ }
+
+ if ( (pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
+ != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
+ && ( ( (pSReg->Sel & X86_SEL_RPL) > pGstDesc->Gen.u2Dpl
+ && ( (pSReg->Sel & X86_SEL_RPL) != 1
+ || !CPUMIsGuestInRawMode(pVCpu) ) )
+ || uCpl > (unsigned)pGstDesc->Gen.u2Dpl
+ )
+ )
+ {
+ Log(("selmIsGstDescGoodForSReg: iSReg=%u DPL=%u CPL=%u RPL=%u InRawMode=%u\n", iSReg,
+ pGstDesc->Gen.u2Dpl, uCpl, pSReg->Sel & X86_SEL_RPL, CPUMIsGuestInRawMode(pVCpu)));
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+/**
+ * Converts a guest GDT or LDT entry to a shadow table entry.
+ *
+ * @param pVM The VM handle.
+ * @param pDesc Guest entry on input, shadow entry on return.
+ */
+DECL_FORCE_INLINE(void) selmGuestToShadowDesc(PVM pVM, PX86DESC pDesc)
+{
+ /*
+ * Code and data selectors are generally 1:1, with the
+ * 'little' adjustment we do for DPL 0 selectors.
+ */
+ if (pDesc->Gen.u1DescType)
+ {
+ /*
+ * Hack for A-bit against Trap E on read-only GDT.
+ */
+ /** @todo Fix this by loading ds and cs before turning off WP. */
+ pDesc->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
+
+ /*
+ * All DPL 0 code and data segments are squeezed into DPL 1.
+ *
+ * We're skipping conforming segments here because those
+ * cannot give us any trouble.
+ */
+ if ( pDesc->Gen.u2Dpl == 0
+ && (pDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
+ != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
+ {
+ pDesc->Gen.u2Dpl = 1;
+ pDesc->Gen.u1Available = 1;
+ }
+# ifdef VBOX_WITH_RAW_RING1
+ else if ( pDesc->Gen.u2Dpl == 1
+ && EMIsRawRing1Enabled(pVM)
+ && (pDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
+ != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
+ {
+ pDesc->Gen.u2Dpl = 2;
+ pDesc->Gen.u1Available = 1;
+ }
+# endif
+ else
+ pDesc->Gen.u1Available = 0;
+ }
+ else
+ {
+ /*
+ * System type selectors are marked not present.
+ * Recompiler or special handling is required for these.
+ */
+ /** @todo what about interrupt gates and rawr0? */
+ pDesc->Gen.u1Present = 0;
+ }
+}
+
+
+/**
+ * Checks if a segment register is stale given the shadow descriptor table
+ * entry.
+ *
+ * @returns @c true if stale, @c false if not.
+ * @param pSReg The segment register.
+ * @param pShwDesc The shadow descriptor entry.
+ * @param iSReg The segment register number (X86_SREG_XXX).
+ */
+DECLINLINE(bool) selmIsSRegStale32(PCCPUMSELREG pSReg, PCX86DESC pShwDesc, uint32_t iSReg)
+{
+ if ( pSReg->Attr.n.u1Present != pShwDesc->Gen.u1Present
+ || pSReg->Attr.n.u4Type != pShwDesc->Gen.u4Type
+ || pSReg->Attr.n.u1DescType != pShwDesc->Gen.u1DescType
+ || pSReg->Attr.n.u1DefBig != pShwDesc->Gen.u1DefBig
+ || pSReg->Attr.n.u1Granularity != pShwDesc->Gen.u1Granularity
+ || pSReg->Attr.n.u2Dpl != pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available)
+ {
+ Log(("selmIsSRegStale32: Attributes changed (%#x -> %#x)\n", pSReg->Attr.u, X86DESC_GET_HID_ATTR(pShwDesc)));
+ return true;
+ }
+
+ if (pSReg->u64Base != X86DESC_BASE(pShwDesc))
+ {
+ Log(("selmIsSRegStale32: base changed (%#llx -> %#x)\n", pSReg->u64Base, X86DESC_BASE(pShwDesc)));
+ return true;
+ }
+
+ if (pSReg->u32Limit != X86DESC_LIMIT_G(pShwDesc))
+ {
+ Log(("selmIsSRegStale32: limit changed (%#x -> %#x)\n", pSReg->u32Limit, X86DESC_LIMIT_G(pShwDesc)));
+ return true;
+ }
+
+ return false;
+}
+
+
+/**
+ * Loads the hidden bits of a selector register from a shadow descriptor table
+ * entry.
+ *
+ * @param pSReg The segment register in question.
+ * @param pShwDesc The shadow descriptor table entry.
+ */
+DECLINLINE(void) selmLoadHiddenSRegFromShadowDesc(PCPUMSELREG pSReg, PCX86DESC pShwDesc)
+{
+ pSReg->Attr.u = X86DESC_GET_HID_ATTR(pShwDesc);
+ pSReg->Attr.n.u2Dpl -= pSReg->Attr.n.u1Available;
+ Assert(pSReg->Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
+ pSReg->u32Limit = X86DESC_LIMIT_G(pShwDesc);
+ pSReg->u64Base = X86DESC_BASE(pShwDesc);
+ pSReg->ValidSel = pSReg->Sel;
+/** @todo VBOX_WITH_RAW_RING1 */
+ if (pSReg->Attr.n.u1Available)
+ pSReg->ValidSel &= ~(RTSEL)1;
+ pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
+}
+
+
+/**
+ * Loads the hidden bits of a selector register from a guest descriptor table
+ * entry.
+ *
+ * @param pVCpu The current virtual CPU.
+ * @param pSReg The segment register in question.
+ * @param pGstDesc The guest descriptor table entry.
+ */
+DECLINLINE(void) selmLoadHiddenSRegFromGuestDesc(PVMCPU pVCpu, PCPUMSELREG pSReg, PCX86DESC pGstDesc)
+{
+ pSReg->Attr.u = X86DESC_GET_HID_ATTR(pGstDesc);
+ pSReg->Attr.n.u4Type |= X86_SEL_TYPE_ACCESSED;
+ pSReg->u32Limit = X86DESC_LIMIT_G(pGstDesc);
+ pSReg->u64Base = X86DESC_BASE(pGstDesc);
+ pSReg->ValidSel = pSReg->Sel;
+/** @todo VBOX_WITH_RAW_RING1 */
+ if ((pSReg->ValidSel & 1) && CPUMIsGuestInRawMode(pVCpu))
+ pSReg->ValidSel &= ~(RTSEL)1;
+ pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
+}
+
+#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
+
+/** @} */
+
+#endif
diff --git a/src/VBox/VMM/include/SELMInternal.h b/src/VBox/VMM/include/SELMInternal.h
index ffaebbcd..f5c0e023 100644
--- a/src/VBox/VMM/include/SELMInternal.h
+++ b/src/VBox/VMM/include/SELMInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -33,6 +33,27 @@
* @{
*/
+/** Enable or disable tracking of Shadow GDT/LDT/TSS.
+ * @{
+ */
+#if defined(VBOX_WITH_RAW_MODE) || defined(DOXYGEN_RUNNING)
+# define SELM_TRACK_SHADOW_GDT_CHANGES
+# define SELM_TRACK_SHADOW_LDT_CHANGES
+# define SELM_TRACK_SHADOW_TSS_CHANGES
+#endif
+/** @} */
+
+/** Enable or disable tracking of Guest GDT/LDT/TSS.
+ * @{
+ */
+#if defined(VBOX_WITH_RAW_MODE) || defined(DOXYGEN_RUNNING)
+# define SELM_TRACK_GUEST_GDT_CHANGES
+# define SELM_TRACK_GUEST_LDT_CHANGES
+# define SELM_TRACK_GUEST_TSS_CHANGES
+#endif
+/** @} */
+
+
/** The number of GDTS allocated for our GDT. (full size) */
#define SELM_GDT_ELEMENTS 8192
@@ -144,12 +165,9 @@ typedef struct SELM
/** Indicates that the Guest GDT access handler have been registered. */
bool fGDTRangeRegistered;
- /** Indicates whether LDT/GDT/TSS monitoring and syncing is disabled. */
- bool fDisableMonitoring;
-
/** Indicates whether the TSS stack selector & base address need to be refreshed. */
bool fSyncTSSRing0Stack;
- bool fPadding2[1+2];
+ bool fPadding2[4];
/** SELMR3UpdateFromCPUM() profiling. */
STAMPROFILE StatUpdateFromCPUM;
@@ -202,290 +220,12 @@ VMMRCDECL(int) selmRCShadowLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCT
VMMRCDECL(int) selmRCShadowTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);
void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp);
+#ifdef VBOX_WITH_RAW_RING1
+void selmSetRing2Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp);
+#endif
RT_C_DECLS_END
-
-#ifdef VBOX_WITH_RAW_MODE_NOT_R0
-
-/**
- * Checks if a shadow descriptor table entry is good for the given segment
- * register.
- *
- * @returns @c true if good, @c false if not.
- * @param pSReg The segment register.
- * @param pShwDesc The shadow descriptor table entry.
- * @param iSReg The segment register index (X86_SREG_XXX).
- * @param uCpl The CPL.
- */
-DECLINLINE(bool) selmIsShwDescGoodForSReg(PCCPUMSELREG pSReg, PCX86DESC pShwDesc, uint32_t iSReg, uint32_t uCpl)
-{
- /*
- * See iemMiscValidateNewSS, iemCImpl_LoadSReg and intel+amd manuals.
- */
-
- if (!pShwDesc->Gen.u1Present)
- {
- Log(("selmIsShwDescGoodForSReg: Not present\n"));
- return false;
- }
-
- if (!pShwDesc->Gen.u1DescType)
- {
- Log(("selmIsShwDescGoodForSReg: System descriptor\n"));
- return false;
- }
-
- if (iSReg == X86_SREG_SS)
- {
- if ((pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
- {
- Log(("selmIsShwDescGoodForSReg: Stack must be writable\n"));
- return false;
- }
- if (uCpl > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available)
- {
- Log(("selmIsShwDescGoodForSReg: CPL(%d) > DPL(%d)\n", uCpl, pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available));
- return false;
- }
- }
- else
- {
- if (iSReg == X86_SREG_CS)
- {
- if (!(pShwDesc->Gen.u4Type & X86_SEL_TYPE_CODE))
- {
- Log(("selmIsShwDescGoodForSReg: CS needs code segment\n"));
- return false;
- }
- }
- else if ((pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
- {
- Log(("selmIsShwDescGoodForSReg: iSReg=%u execute only\n", iSReg));
- return false;
- }
-
- if ( (pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
- != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
- && ( ( (pSReg->Sel & X86_SEL_RPL) > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available
- && (pSReg->Sel & X86_SEL_RPL) != pShwDesc->Gen.u1Available )
- || uCpl > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available ) )
- {
- Log(("selmIsShwDescGoodForSReg: iSReg=%u DPL=%u CPL=%u RPL=%u\n", iSReg,
- pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available, uCpl, pSReg->Sel & X86_SEL_RPL));
- return false;
- }
- }
-
- return true;
-}
-
-
-/**
- * Checks if a guest descriptor table entry is good for the given segment
- * register.
- *
- * @returns @c true if good, @c false if not.
- * @param pVCpu The current virtual CPU.
- * @param pSReg The segment register.
- * @param pGstDesc The guest descriptor table entry.
- * @param iSReg The segment register index (X86_SREG_XXX).
- * @param uCpl The CPL.
- */
-DECLINLINE(bool) selmIsGstDescGoodForSReg(PVMCPU pVCpu, PCCPUMSELREG pSReg, PCX86DESC pGstDesc, uint32_t iSReg, uint32_t uCpl)
-{
- /*
- * See iemMiscValidateNewSS, iemCImpl_LoadSReg and intel+amd manuals.
- */
-
- if (!pGstDesc->Gen.u1Present)
- {
- Log(("selmIsGstDescGoodForSReg: Not present\n"));
- return false;
- }
-
- if (!pGstDesc->Gen.u1DescType)
- {
- Log(("selmIsGstDescGoodForSReg: System descriptor\n"));
- return false;
- }
-
- if (iSReg == X86_SREG_SS)
- {
- if ((pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
- {
- Log(("selmIsGstDescGoodForSReg: Stack must be writable\n"));
- return false;
- }
- if (uCpl > pGstDesc->Gen.u2Dpl)
- {
- Log(("selmIsGstDescGoodForSReg: CPL(%d) > DPL(%d)\n", uCpl, pGstDesc->Gen.u2Dpl));
- return false;
- }
- }
- else
- {
- if (iSReg == X86_SREG_CS)
- {
- if (!(pGstDesc->Gen.u4Type & X86_SEL_TYPE_CODE))
- {
- Log(("selmIsGstDescGoodForSReg: CS needs code segment\n"));
- return false;
- }
- }
- else if ((pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
- {
- Log(("selmIsGstDescGoodForSReg: iSReg=%u execute only\n", iSReg));
- return false;
- }
-
- if ( (pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
- != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
- && ( ( (pSReg->Sel & X86_SEL_RPL) > pGstDesc->Gen.u2Dpl
- && ( (pSReg->Sel & X86_SEL_RPL) != 1
- || !CPUMIsGuestInRawMode(pVCpu) ) )
- || uCpl > (unsigned)pGstDesc->Gen.u2Dpl
- )
- )
- {
- Log(("selmIsGstDescGoodForSReg: iSReg=%u DPL=%u CPL=%u RPL=%u InRawMode=%u\n", iSReg,
- pGstDesc->Gen.u2Dpl, uCpl, pSReg->Sel & X86_SEL_RPL, CPUMIsGuestInRawMode(pVCpu)));
- return false;
- }
- }
-
- return true;
-}
-
-
-/**
- * Converts a guest GDT or LDT entry to a shadow table entry.
- *
- * @param pDesc Guest entry on input, shadow entry on return.
- */
-DECL_FORCE_INLINE(void) selmGuestToShadowDesc(PX86DESC pDesc)
-{
- /*
- * Code and data selectors are generally 1:1, with the
- * 'little' adjustment we do for DPL 0 selectors.
- */
- if (pDesc->Gen.u1DescType)
- {
- /*
- * Hack for A-bit against Trap E on read-only GDT.
- */
- /** @todo Fix this by loading ds and cs before turning off WP. */
- pDesc->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
-
- /*
- * All DPL 0 code and data segments are squeezed into DPL 1.
- *
- * We're skipping conforming segments here because those
- * cannot give us any trouble.
- */
- if ( pDesc->Gen.u2Dpl == 0
- && (pDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
- != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
- {
- pDesc->Gen.u2Dpl = 1;
- pDesc->Gen.u1Available = 1;
- }
- else
- pDesc->Gen.u1Available = 0;
- }
- else
- {
- /*
- * System type selectors are marked not present.
- * Recompiler or special handling is required for these.
- */
- /** @todo what about interrupt gates and rawr0? */
- pDesc->Gen.u1Present = 0;
- }
-}
-
-
-/**
- * Checks if a segment register is stale given the shadow descriptor table
- * entry.
- *
- * @returns @c true if stale, @c false if not.
- * @param pSReg The segment register.
- * @param pShwDesc The shadow descriptor entry.
- * @param iSReg The segment register number (X86_SREG_XXX).
- */
-DECLINLINE(bool) selmIsSRegStale32(PCCPUMSELREG pSReg, PCX86DESC pShwDesc, uint32_t iSReg)
-{
- if ( pSReg->Attr.n.u1Present != pShwDesc->Gen.u1Present
- || pSReg->Attr.n.u4Type != pShwDesc->Gen.u4Type
- || pSReg->Attr.n.u1DescType != pShwDesc->Gen.u1DescType
- || pSReg->Attr.n.u1DefBig != pShwDesc->Gen.u1DefBig
- || pSReg->Attr.n.u1Granularity != pShwDesc->Gen.u1Granularity
- || pSReg->Attr.n.u2Dpl != pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available)
- {
- Log(("selmIsSRegStale32: Attributes changed (%#x -> %#x)\n", pSReg->Attr.u, X86DESC_GET_HID_ATTR(pShwDesc)));
- return true;
- }
-
- if (pSReg->u64Base != X86DESC_BASE(pShwDesc))
- {
- Log(("selmIsSRegStale32: base changed (%#llx -> %#llx)\n", pSReg->u64Base, X86DESC_BASE(pShwDesc)));
- return true;
- }
-
- if (pSReg->u32Limit != X86DESC_LIMIT_G(pShwDesc))
- {
- Log(("selmIsSRegStale32: limit changed (%#x -> %#x)\n", pSReg->u32Limit, X86DESC_LIMIT_G(pShwDesc)));
- return true;
- }
-
- return false;
-}
-
-
-/**
- * Loads the hidden bits of a selector register from a shadow descriptor table
- * entry.
- *
- * @param pSReg The segment register in question.
- * @param pShwDesc The shadow descriptor table entry.
- */
-DECLINLINE(void) selmLoadHiddenSRegFromShadowDesc(PCPUMSELREG pSReg, PCX86DESC pShwDesc)
-{
- pSReg->Attr.u = X86DESC_GET_HID_ATTR(pShwDesc);
- pSReg->Attr.n.u2Dpl -= pSReg->Attr.n.u1Available;
- Assert(pSReg->Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
- pSReg->u32Limit = X86DESC_LIMIT_G(pShwDesc);
- pSReg->u64Base = X86DESC_BASE(pShwDesc);
- pSReg->ValidSel = pSReg->Sel;
- if (pSReg->Attr.n.u1Available)
- pSReg->ValidSel &= ~(RTSEL)1;
- pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
-}
-
-
-/**
- * Loads the hidden bits of a selector register from a guest descriptor table
- * entry.
- *
- * @param pVCpu The current virtual CPU.
- * @param pSReg The segment register in question.
- * @param pGstDesc The guest descriptor table entry.
- */
-DECLINLINE(void) selmLoadHiddenSRegFromGuestDesc(PVMCPU pVCpu, PCPUMSELREG pSReg, PCX86DESC pGstDesc)
-{
- pSReg->Attr.u = X86DESC_GET_HID_ATTR(pGstDesc);
- pSReg->Attr.n.u4Type |= X86_SEL_TYPE_ACCESSED;
- pSReg->u32Limit = X86DESC_LIMIT_G(pGstDesc);
- pSReg->u64Base = X86DESC_BASE(pGstDesc);
- pSReg->ValidSel = pSReg->Sel;
- if ((pSReg->ValidSel & 1) && CPUMIsGuestInRawMode(pVCpu))
- pSReg->ValidSel &= ~(RTSEL)1;
- pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
-}
-
-#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
-
/** @} */
#endif
diff --git a/src/VBox/VMM/include/SSMInternal.h b/src/VBox/VMM/include/SSMInternal.h
index d1f8752a..ccfe3e45 100644
--- a/src/VBox/VMM/include/SSMInternal.h
+++ b/src/VBox/VMM/include/SSMInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -199,6 +199,8 @@ typedef struct SSMUNIT
/** The offset of the final data unit.
* This is used for constructing the directory. */
RTFOFF offStream;
+ /** Critical section to be taken before working any of the callbacks. */
+ PPDMCRITSECT pCritSect;
/** The guessed size of the data unit - used only for progress indication. */
size_t cbGuess;
/** Name size. (bytes) */
diff --git a/src/VBox/VMM/include/STAMInternal.h b/src/VBox/VMM/include/STAMInternal.h
index 62b11bfc..1bfc7290 100644
--- a/src/VBox/VMM/include/STAMInternal.h
+++ b/src/VBox/VMM/include/STAMInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -23,6 +23,7 @@
#include <VBox/vmm/stam.h>
#include <VBox/vmm/gvmm.h>
#include <VBox/vmm/gmm.h>
+#include <iprt/list.h>
#include <iprt/semaphore.h>
@@ -35,13 +36,51 @@ RT_C_DECLS_BEGIN
* @{
*/
+/** Enables the lookup tree.
+ * This is an optimization for speeding up registration as well as query. */
+#define STAM_WITH_LOOKUP_TREE
+
+
+/** Pointer to sample descriptor. */
+typedef struct STAMDESC *PSTAMDESC;
+/** Pointer to a sample lookup node. */
+typedef struct STAMLOOKUP *PSTAMLOOKUP;
+
+/**
+ * Sample lookup node.
+ */
+typedef struct STAMLOOKUP
+{
+ /** The parent lookup record. This is NULL for the root node. */
+ PSTAMLOOKUP pParent;
+ /** Array of children (using array for binary searching). */
+ PSTAMLOOKUP *papChildren;
+ /** Pointer to the description node, if any. */
+ PSTAMDESC pDesc;
+ /** Number of decentants with descriptors. (Use for freeing up sub-trees.) */
+ uint32_t cDescsInTree;
+ /** The number of children. */
+ uint16_t cChildren;
+ /** The index in the parent paChildren array. UINT16_MAX for the root node. */
+ uint16_t iParent;
+ /** The path offset. */
+ uint16_t off;
+ /** The size of the path component. */
+ uint16_t cch;
+ /** The name (variable size). */
+ char szName[1];
+} STAMLOOKUP;
+
+
/**
* Sample descriptor.
*/
typedef struct STAMDESC
{
- /** Pointer to the next sample. */
- struct STAMDESC *pNext;
+ /** Our entry in the big linear list. */
+ RTLISTNODE ListEntry;
+ /** Pointer to our lookup node. */
+ PSTAMLOOKUP pLookup;
/** Sample name. */
const char *pszName;
/** Sample type. */
@@ -87,10 +126,6 @@ typedef struct STAMDESC
/** Description. */
const char *pszDesc;
} STAMDESC;
-/** Pointer to sample descriptor. */
-typedef STAMDESC *PSTAMDESC;
-/** Pointer to const sample descriptor. */
-typedef const STAMDESC *PCSTAMDESC;
/**
@@ -98,9 +133,12 @@ typedef const STAMDESC *PCSTAMDESC;
*/
typedef struct STAMUSERPERVM
{
- /** Pointer to the first sample. */
- R3PTRTYPE(PSTAMDESC) pHead;
- /** RW Lock for the list. */
+ /** List of samples. */
+ RTLISTANCHOR List;
+ /** Root of the lookup tree. */
+ PSTAMLOOKUP pRoot;
+
+ /** RW Lock for the list and tree. */
RTSEMRW RWSem;
/** The copy of the GVMM statistics. */
@@ -113,7 +151,9 @@ typedef struct STAMUSERPERVM
/** The copy of the GMM statistics. */
GMMSTATS GMMStats;
} STAMUSERPERVM;
+#ifdef IN_RING3
AssertCompileMemberAlignment(STAMUSERPERVM, GMMStats, 8);
+#endif
/** Pointer to the STAM data kept in the UVM. */
typedef STAMUSERPERVM *PSTAMUSERPERVM;
@@ -127,6 +167,8 @@ typedef STAMUSERPERVM *PSTAMUSERPERVM;
#define STAM_UNLOCK_RD(pUVM) do { int rcSem = RTSemRWReleaseRead(pUVM->stam.s.RWSem); AssertRC(rcSem); } while (0)
/** UnLocks the sample descriptors after writing. */
#define STAM_UNLOCK_WR(pUVM) do { int rcSem = RTSemRWReleaseWrite(pUVM->stam.s.RWSem); AssertRC(rcSem); } while (0)
+/** Lazy initialization */
+#define STAM_LAZY_INIT(pUVM) do { } while (0)
/** @} */
diff --git a/src/VBox/VMM/include/TMInternal.h b/src/VBox/VMM/include/TMInternal.h
index 06894900..306a4b85 100644
--- a/src/VBox/VMM/include/TMInternal.h
+++ b/src/VBox/VMM/include/TMInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
diff --git a/src/VBox/VMM/include/TRPMInternal.h b/src/VBox/VMM/include/TRPMInternal.h
index 71973901..90a42192 100644
--- a/src/VBox/VMM/include/TRPMInternal.h
+++ b/src/VBox/VMM/include/TRPMInternal.h
@@ -26,7 +26,9 @@
/** Enable to allow trap forwarding in GC. */
-#define TRPM_FORWARD_TRAPS_IN_GC
+#ifdef VBOX_WITH_RAW_MODE
+# define TRPM_FORWARD_TRAPS_IN_GC
+#endif
/** First interrupt handler. Used for validating input. */
#define TRPM_HANDLER_INT_BASE 0x20
@@ -93,9 +95,6 @@ typedef struct TRPM
* See TRPM2TRPMCPU(). */
RTINT offTRPMCPU;
- /** IDT monitoring and sync flag (HWACC). */
- bool fDisableMonitoring; /** @todo r=bird: bool and 7 byte achPadding1. */
-
/** Whether monitoring of the guest IDT is enabled or not.
*
* This configuration option is provided for speeding up guest like Solaris
@@ -111,7 +110,7 @@ typedef struct TRPM
bool fSafeToDropGuestIDTMonitoring;
/** Padding to get the IDTs at a 16 byte alignment. */
- uint8_t abPadding1[6];
+ uint8_t abPadding1[7];
/** IDTs. Aligned at 16 byte offset for speed. */
VBOXIDTE aIdt[256];
@@ -156,8 +155,6 @@ typedef struct TRPM
/** Statistics for interrupt handlers (allocated on the hypervisor heap) - R3
* pointer. */
R3PTRTYPE(PSTAMCOUNTER) paStatForwardedIRQR3;
- /** Statistics for interrupt handlers - R0 pointer. */
- R0PTRTYPE(PSTAMCOUNTER) paStatForwardedIRQR0;
/** Statistics for interrupt handlers - RC pointer. */
RCPTRTYPE(PSTAMCOUNTER) paStatForwardedIRQRC;
@@ -165,8 +162,6 @@ typedef struct TRPM
RCPTRTYPE(PSTAMCOUNTER) paStatHostIrqRC;
/** Host interrupt statistics (allocated on the hypervisor heap) - R3 ptr. */
R3PTRTYPE(PSTAMCOUNTER) paStatHostIrqR3;
- /** Host interrupt statistics (allocated on the hypervisor heap) - R0 ptr. */
- R0PTRTYPE(PSTAMCOUNTER) paStatHostIrqR0;
#endif
} TRPM;
@@ -231,6 +226,16 @@ typedef struct TRPMCPU
/** Previous trap vector # - for debugging. */
RTGCUINT uPrevVector;
+
+ /** Instruction length for software interrupts and software exceptions (#BP,
+ * #OF) */
+ uint8_t cbInstr;
+
+ /** Saved instruction length. */
+ uint8_t cbSavedInstr;
+
+ /** Padding. */
+ uint8_t au8Padding[2];
} TRPMCPU;
/** Pointer to TRPMCPU Data. */
@@ -253,16 +258,7 @@ VMMDECL(int) trpmClearGuestTrapHandler(PVM pVM, unsigned iTrap);
#ifdef IN_RING3
-
-/**
- * Clear passthrough interrupt gate handler (reset to default handler)
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param iTrap Trap/interrupt gate number.
- */
-VMMR3DECL(int) trpmR3ClearPassThroughHandler(PVM pVM, unsigned iTrap);
-
+int trpmR3ClearPassThroughHandler(PVM pVM, unsigned iTrap);
#endif
diff --git a/src/VBox/VMM/include/TRPMInternal.mac b/src/VBox/VMM/include/TRPMInternal.mac
index ba8d7d8e..6228db95 100644
--- a/src/VBox/VMM/include/TRPMInternal.mac
+++ b/src/VBox/VMM/include/TRPMInternal.mac
@@ -29,9 +29,8 @@
struc TRPM
.offVM resd 1
.offTRPMCPU resd 1
- .fDisableMonitoring resb 1
.fSafeToDropGuestIDTMonitoring resb 1
- .abPadding1 resb 6
+ .abPadding1 resb 7
.aIdt resd 512
.au32IdtPatched resd 8
.aTmpTrapHandlers RTRCPTR_RES 256
@@ -59,11 +58,9 @@ struc TRPM
%ifdef VBOX_WITH_STATISTICS
.paStatForwardedIRQR3 RTR3PTR_RES 1
- .paStatForwardedIRQR0 RTR0PTR_RES 1
.paStatForwardedIRQRC RTRCPTR_RES 1
.paStatHostIrqRC RTRCPTR_RES 1
.paStatHostIrqR3 RTR3PTR_RES 1
- .paStatHostIrqR0 RTR0PTR_RES 1
%endif
endstruc
@@ -79,6 +76,9 @@ struc TRPMCPU
.uSavedErrorCode RTGCPTR_RES 1
.uSavedCR2 RTGCPTR_RES 1
.uPrevVector RTGCPTR_RES 1
+ .cbInstr resb 1
+ .cbSavedInstr resb 1
+ .au8Padding resb 2
endstruc
struc VBOXTSS
diff --git a/src/VBox/VMM/include/VMInternal.h b/src/VBox/VMM/include/VMInternal.h
index 3a1a3ffe..fa42fb18 100644
--- a/src/VBox/VMM/include/VMInternal.h
+++ b/src/VBox/VMM/include/VMInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2011 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -163,6 +163,8 @@ typedef struct VMINT
R3PTRTYPE(PVMRUNTIMEERROR) pRuntimeErrorR3;
/** The VM was/is-being teleported and has not yet been fully resumed. */
bool fTeleportedAndNotFullyResumedYet;
+ /** The VM should power off instead of reset. */
+ bool fPowerOffInsteadOfReset;
} VMINT;
/** Pointer to the VM Internal Data (part of the VM structure). */
typedef VMINT *PVMINT;
@@ -228,6 +230,11 @@ typedef struct VMINTUSERPERVM
* and when debugging. */
VMSTATE enmPrevVMState;
+ /** Reason for the most recent suspend operation. */
+ VMSUSPENDREASON enmSuspendReason;
+ /** Reason for the most recent operation. */
+ VMRESUMEREASON enmResumeReason;
+
/** Critical section for pAtError and pAtRuntimeError. */
RTCRITSECT AtErrorCritSect;
diff --git a/src/VBox/VMM/include/VMMInternal.h b/src/VBox/VMM/include/VMMInternal.h
index 38b45a72..fa3f253d 100644
--- a/src/VBox/VMM/include/VMMInternal.h
+++ b/src/VBox/VMM/include/VMMInternal.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -21,10 +21,10 @@
#include <VBox/cdefs.h>
#include <VBox/sup.h>
#include <VBox/vmm/stam.h>
+#include <VBox/vmm/vmm.h>
#include <VBox/log.h>
#include <iprt/critsect.h>
-
#if !defined(IN_VMM_R3) && !defined(IN_VMM_R0) && !defined(IN_VMM_RC)
# error "Not in VMM! This is an internal header!"
#endif
@@ -47,7 +47,7 @@
* so you have to sign up here by adding your defined(DEBUG_<userid>) to the
* #if, or by adding VBOX_WITH_R0_LOGGING to your LocalConfig.kmk.
*/
-#if defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(DOXYGEN_RUNNING)
+#if defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(DEBUG_ramshankar) || defined(DOXYGEN_RUNNING)
# define VBOX_WITH_R0_LOGGING
#endif
@@ -216,19 +216,18 @@ typedef struct VMM
RTR0PTR pvCoreCodeR0;
/** Pointer to core code guest context mapping. */
RTRCPTR pvCoreCodeRC;
- RTRCPTR pRCPadding0; /**< Alignment padding */
+ RTRCPTR pRCPadding0; /**< Alignment padding. */
#ifdef VBOX_WITH_NMI
/** The guest context address of the APIC (host) mapping. */
RTRCPTR GCPtrApicBase;
- RTRCPTR pRCPadding1; /**< Alignment padding */
+ RTRCPTR pRCPadding1; /**< Alignment padding. */
#endif
/** The current switcher.
* This will be set before the VMM is fully initialized. */
VMMSWITCHER enmSwitcher;
- /** Flag to disable the switcher permanently (VMX) (boolean) */
- bool fSwitcherDisabled;
/** Array of offsets to the different switchers within the core code. */
- RTUINT aoffSwitchers[VMMSWITCHER_MAX];
+ uint32_t aoffSwitchers[VMMSWITCHER_MAX];
+ uint32_t u32Padding2; /**< Alignment padding. */
/** Resume Guest Execution. See CPUMGCResumeGuest(). */
RTRCPTR pfnCPUMRCResumeGuest;
@@ -418,6 +417,16 @@ typedef struct VMMCPU
* This is NULL if logging is disabled. */
R0PTRTYPE(PVMMR0LOGGER) pR0LoggerR0;
+ /** @name Thread-context hooks.
+ * @{*/
+ R0PTRTYPE(RTTHREADCTX) hR0ThreadCtx;
+#if HC_ARCH_BITS == 32
+ uint32_t u32Padding;
+#else
+ uint64_t u64Padding;
+#endif
+ /** @} */
+
/** @name Rendezvous
* @{ */
/** Whether the EMT is executing a rendezvous right now. For detecting
@@ -431,7 +440,7 @@ typedef struct VMMCPU
SUPDRVTRACERUSRCTX TracerCtx;
/** @} */
- /** Alignment padding, making sure u64CallRing3Arg is nicly aligned. */
+ /** Alignment padding, making sure u64CallRing3Arg is nicely aligned. */
uint32_t au32Padding1[3];
/** @name Call Ring-3
@@ -445,12 +454,15 @@ typedef struct VMMCPU
int32_t rcCallRing3;
/** The argument to the operation. */
uint64_t u64CallRing3Arg;
+ /** The Ring-0 notification callback. */
+ R0PTRTYPE(PFNVMMR0CALLRING3NOTIFICATION) pfnCallRing3CallbackR0;
+ /** The Ring-0 notification callback user argument. */
+ R0PTRTYPE(void *) pvCallRing3CallbackUserR0;
/** The Ring-0 jmp buffer.
* @remarks The size of this type isn't stable in assembly, so don't put
* anything that needs to be accessed from assembly after it. */
VMMR0JMPBUF CallRing3JmpBufR0;
/** @} */
-
} VMMCPU;
AssertCompileMemberAlignment(VMMCPU, TracerCtx, 8);
/** Pointer to VMMCPU. */
@@ -506,17 +518,35 @@ typedef enum VMMGCOPERATION
/** Testcase for checking interrupt masking.. */
VMMGC_DO_TESTCASE_INTERRUPT_MASKING,
/** Switching testing and profiling stub. */
- VMMGC_DO_TESTCASE_HWACCM_NOP,
+ VMMGC_DO_TESTCASE_HM_NOP,
/** The usual 32-bit hack. */
VMMGC_DO_32_BIT_HACK = 0x7fffffff
} VMMGCOPERATION;
+
+/**
+ * MSR test result entry.
+ */
+typedef struct VMMTESTMSRENTRY
+{
+ /** The MSR number, including padding.
+ * Set to UINT64_MAX if invalid MSR. */
+ uint64_t uMsr;
+ /** The register value. */
+ uint64_t uValue;
+} VMMTESTMSRENTRY;
+/** Pointer to an MSR test result entry. */
+typedef VMMTESTMSRENTRY *PVMMTESTMSRENTRY;
+
+
+
RT_C_DECLS_BEGIN
int vmmInitFormatTypes(void);
void vmmTermFormatTypes(void);
+uint32_t vmmGetBuildType(void);
#ifdef IN_RING3
int vmmR3SwitcherInit(PVM pVM);
diff --git a/src/VBox/VMM/include/VMMInternal.mac b/src/VBox/VMM/include/VMMInternal.mac
index e306790d..63186712 100644
--- a/src/VBox/VMM/include/VMMInternal.mac
+++ b/src/VBox/VMM/include/VMMInternal.mac
@@ -4,7 +4,7 @@
;
;
-; Copyright (C) 2006-2009 Oracle Corporation
+; Copyright (C) 2006-2012 Oracle Corporation
;
; This file is part of VirtualBox Open Source Edition (OSE), as
; available from http://www.virtualbox.org. This file is free software;
@@ -107,6 +107,13 @@ struc VMMCPU
.pR0LoggerR3 RTR3PTR_RES 1
.pR0LoggerR0 RTR0PTR_RES 1
+ .hR0ThreadCtx RTR0PTR_RES 1
+%if HC_ARCH_BITS == 32
+ .u32Padding resd 1
+%else
+ .u64Padding resq 1
+%endif
+
.fInRendezvous resb 1
%if HC_ARCH_BITS == 32
.afPadding resb 3
@@ -123,7 +130,8 @@ struc VMMCPU
.enmCallRing3Operation resd 1
.rcCallRing3 resd 1
.u64CallRing3Arg resq 1
+ .pfnCallRing3CallbackR0 RTR0PTR_RES 1
+ .pvCallRing3CallbackUserR0 RTR0PTR_RES 1
; .CallRing3JmpBufR0 resb no-can-do
-
endstruc
diff --git a/src/VBox/VMM/include/VMMSwitcher.h b/src/VBox/VMM/include/VMMSwitcher.h
index 19fe1507..a46e9d99 100644
--- a/src/VBox/VMM/include/VMMSwitcher.h
+++ b/src/VBox/VMM/include/VMMSwitcher.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -127,12 +127,14 @@ typedef struct VMMSWITCHERDEF
#pragma pack()
RT_C_DECLS_BEGIN
+extern VMMSWITCHERDEF vmmR3SwitcherX86Stub_Def;
extern VMMSWITCHERDEF vmmR3Switcher32BitTo32Bit_Def;
extern VMMSWITCHERDEF vmmR3Switcher32BitToPAE_Def;
extern VMMSWITCHERDEF vmmR3Switcher32BitToAMD64_Def;
extern VMMSWITCHERDEF vmmR3SwitcherPAETo32Bit_Def;
extern VMMSWITCHERDEF vmmR3SwitcherPAEToPAE_Def;
extern VMMSWITCHERDEF vmmR3SwitcherPAEToAMD64_Def;
+extern VMMSWITCHERDEF vmmR3SwitcherAMD64Stub_Def;
extern VMMSWITCHERDEF vmmR3SwitcherAMD64To32Bit_Def;
extern VMMSWITCHERDEF vmmR3SwitcherAMD64ToPAE_Def;
extern VMMSWITCHERDEF vmmR3SwitcherAMD64ToAMD64_Def;
diff --git a/src/VBox/VMM/include/VMMSwitcher.mac b/src/VBox/VMM/include/VMMSwitcher.mac
index dbb4ed70..ec861c6d 100644
--- a/src/VBox/VMM/include/VMMSwitcher.mac
+++ b/src/VBox/VMM/include/VMMSwitcher.mac
@@ -4,7 +4,7 @@
;
;
-; Copyright (C) 2006-2007 Oracle Corporation
+; Copyright (C) 2006-2013 Oracle Corporation
;
; This file is part of VirtualBox Open Source Edition (OSE), as
; available from http://www.virtualbox.org. This file is free software;
@@ -31,9 +31,8 @@
%define VMMSWITCHER_AMD64_TO_32 7
%define VMMSWITCHER_AMD64_TO_PAE 8
%define VMMSWITCHER_AMD64_TO_AMD64 9
-;; @todo the rest are all wrong. sync with vmm.h.
-%define VMMSWITCHER_HOST_TO_VMX 9
-%define VMMSWITCHER_HOST_TO_SVM 10
+%define VMMSWITCHER_X86_STUB 10
+%define VMMSWITCHER_AMD64_STUB 11
%define VMMSWITCHER_MAX 12
; }
@@ -128,11 +127,19 @@ BEGINCODE
;%define DEBUG_STUFF 1
%ifdef DEBUG_STUFF
- %define DEBUG_CHAR(ch) COM_CHAR ch
- %define DEBUG_S_CHAR(ch) COM_S_CHAR ch
+ %define DEBUG_CHAR(ch) COM_CHAR ch
+ %define DEBUG32_CHAR(ch) COM_CHAR ch
+ %define DEBUG64_CHAR(ch) COM_CHAR ch
+ %define DEBUG_S_CHAR(ch) COM_S_CHAR ch
+ %define DEBUG32_S_CHAR(ch) COM32_S_CHAR ch
+ %define DEBUG64_S_CHAR(ch) COM64_S_CHAR ch
%else
%define DEBUG_CHAR(ch)
+ %define DEBUG32_CHAR(ch)
+ %define DEBUG64_CHAR(ch)
%define DEBUG_S_CHAR(ch)
+ %define DEBUG32_S_CHAR(ch)
+ %define DEBUG64_S_CHAR(ch)
%endif
%endif ; !___VMMSwitcher_mac
diff --git a/src/VBox/VMM/include/internal/em.h b/src/VBox/VMM/include/internal/em.h
deleted file mode 100644
index 2d63f0b0..00000000
--- a/src/VBox/VMM/include/internal/em.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* $Id: em.h $ */
-/** @file
- * EM - Internal VMM header file.
- */
-
-/*
- * Copyright (C) 2006-2010 Oracle Corporation
- *
- * This file is part of VirtualBox Open Source Edition (OSE), as
- * available from http://www.virtualbox.org. This file is free software;
- * you can redistribute it and/or modify it under the terms of the GNU
- * General Public License (GPL) as published by the Free Software
- * Foundation, in version 2 as it comes in the "COPYING" file of the
- * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
- * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
- */
-
-#ifndef ___EM_include_internal_h
-#define ___EM_include_internal_h
-
-#include <VBox/vmm/em.h>
-
-VMMR3DECL(int) EMR3NotifyResume(PVM pVM);
-VMMR3DECL(int) EMR3NotifySuspend(PVM pVM);
-
-VMMR3DECL(bool) EMR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu);
-
-#endif
diff --git a/src/VBox/VMM/include/internal/pgm.h b/src/VBox/VMM/include/internal/pgm.h
index 207050e4..55cb4945 100644
--- a/src/VBox/VMM/include/internal/pgm.h
+++ b/src/VBox/VMM/include/internal/pgm.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2010 Oracle Corporation
+ * Copyright (C) 2006-2011 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -45,6 +45,11 @@ typedef enum PGMPAGETYPE
/** MMIO2 page aliased over an MMIO page. (RWX)
* See PGMHandlerPhysicalPageAlias(). */
PGMPAGETYPE_MMIO2_ALIAS_MMIO,
+ /** Special page aliased over an MMIO page. (RWX)
+ * See PGMHandlerPhysicalPageAliasHC(), but this is generally only used for
+ * VT-x's APIC access page at the moment. Treated as MMIO by everyone except
+ * the shadow paging code. */
+ PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
/** Shadowed ROM. (RWX) */
PGMPAGETYPE_ROM_SHADOW,
/** ROM page. (R-X) */
@@ -54,7 +59,7 @@ typedef enum PGMPAGETYPE
/** End of valid entries. */
PGMPAGETYPE_END
} PGMPAGETYPE;
-AssertCompile(PGMPAGETYPE_END <= 7);
+AssertCompile(PGMPAGETYPE_END == 8);
VMMDECL(PGMPAGETYPE) PGMPhysGetPageType(PVM pVM, RTGCPHYS GCPhys);
diff --git a/src/VBox/VMM/include/internal/vm.h b/src/VBox/VMM/include/internal/vm.h
deleted file mode 100644
index a3bc6ce6..00000000
--- a/src/VBox/VMM/include/internal/vm.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* $Id: vm.h $ */
-/** @file
- * VM - Internal VMM header file.
- */
-
-/*
- * Copyright (C) 2006-2010 Oracle Corporation
- *
- * This file is part of VirtualBox Open Source Edition (OSE), as
- * available from http://www.virtualbox.org. This file is free software;
- * you can redistribute it and/or modify it under the terms of the GNU
- * General Public License (GPL) as published by the Free Software
- * Foundation, in version 2 as it comes in the "COPYING" file of the
- * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
- * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
- */
-
-#ifndef ___VM_include_internal_h
-#define ___VM_include_internal_h
-
-#include <VBox/vmm/vm.h>
-
-VMMR3DECL(int) VMR3SaveFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser, bool *pfSuspended, bool fSkipStateChanges);
-VMMR3DECL(int) VMR3LoadFromStreamFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser);
-
-#endif