summaryrefslogtreecommitdiff
path: root/src/VBox/VMM/VMMR3
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@baserock.org>2014-03-26 19:21:20 +0000
committer <>2014-05-08 15:03:54 +0000
commitfb123f93f9f5ce42c8e5785d2f8e0edaf951740e (patch)
treec2103d76aec5f1f10892cd1d3a38e24f665ae5db /src/VBox/VMM/VMMR3
parent58ed4748338f9466599adfc8a9171280ed99e23f (diff)
downloadVirtualBox-master.tar.gz
Imported from /home/lorry/working-area/delta_VirtualBox/VirtualBox-4.3.10.tar.bz2.HEADVirtualBox-4.3.10master
Diffstat (limited to 'src/VBox/VMM/VMMR3')
-rw-r--r--src/VBox/VMM/VMMR3/CFGM.cpp243
-rw-r--r--src/VBox/VMM/VMMR3/CPUM.cpp1518
-rw-r--r--src/VBox/VMM/VMMR3/CPUMDbg.cpp62
-rw-r--r--src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp1355
-rw-r--r--src/VBox/VMM/VMMR3/CPUMR3Db.cpp780
-rw-r--r--src/VBox/VMM/VMMR3/CSAM.cpp263
-rw-r--r--src/VBox/VMM/VMMR3/DBGF.cpp298
-rw-r--r--src/VBox/VMM/VMMR3/DBGFAddr.cpp143
-rw-r--r--src/VBox/VMM/VMMR3/DBGFAddrSpace.cpp750
-rw-r--r--src/VBox/VMM/VMMR3/DBGFBp.cpp340
-rw-r--r--src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp24
-rw-r--r--src/VBox/VMM/VMMR3/DBGFCpu.cpp73
-rw-r--r--src/VBox/VMM/VMMR3/DBGFDisas.cpp252
-rw-r--r--src/VBox/VMM/VMMR3/DBGFInfo.cpp308
-rw-r--r--src/VBox/VMM/VMMR3/DBGFLog.cpp118
-rw-r--r--src/VBox/VMM/VMMR3/DBGFMem.cpp175
-rw-r--r--src/VBox/VMM/VMMR3/DBGFModule.cpp2
-rw-r--r--src/VBox/VMM/VMMR3/DBGFOS.cpp168
-rw-r--r--src/VBox/VMM/VMMR3/DBGFR3Trace.cpp4
-rw-r--r--src/VBox/VMM/VMMR3/DBGFReg.cpp598
-rw-r--r--src/VBox/VMM/VMMR3/DBGFStack.cpp101
-rw-r--r--src/VBox/VMM/VMMR3/DBGFSym.cpp1131
-rw-r--r--src/VBox/VMM/VMMR3/EM.cpp656
-rw-r--r--src/VBox/VMM/VMMR3/EMHM.cpp (renamed from src/VBox/VMM/VMMR3/EMHwaccm.cpp)366
-rw-r--r--src/VBox/VMM/VMMR3/EMR3Dbg.cpp74
-rw-r--r--src/VBox/VMM/VMMR3/EMRaw.cpp315
-rw-r--r--src/VBox/VMM/VMMR3/FTM.cpp127
-rw-r--r--src/VBox/VMM/VMMR3/HM.cpp3169
-rw-r--r--src/VBox/VMM/VMMR3/HWACCM.cpp2935
-rw-r--r--src/VBox/VMM/VMMR3/IEMR3.cpp60
-rw-r--r--src/VBox/VMM/VMMR3/IOM.cpp318
-rw-r--r--src/VBox/VMM/VMMR3/MM.cpp2
-rw-r--r--src/VBox/VMM/VMMR3/MMHeap.cpp2
-rw-r--r--src/VBox/VMM/VMMR3/MMHyper.cpp17
-rw-r--r--src/VBox/VMM/VMMR3/MMPagePool.cpp2
-rw-r--r--src/VBox/VMM/VMMR3/MMUkHeap.cpp2
-rw-r--r--src/VBox/VMM/VMMR3/PATM.cpp511
-rw-r--r--src/VBox/VMM/VMMR3/PATMA.asm302
-rw-r--r--src/VBox/VMM/VMMR3/PATMA.mac2
-rw-r--r--src/VBox/VMM/VMMR3/PATMGuest.cpp6
-rw-r--r--src/VBox/VMM/VMMR3/PATMPatch.cpp70
-rw-r--r--src/VBox/VMM/VMMR3/PATMPatch.h2
-rw-r--r--src/VBox/VMM/VMMR3/PATMR3Dbg.cpp404
-rw-r--r--src/VBox/VMM/VMMR3/PATMSSM.cpp210
-rw-r--r--src/VBox/VMM/VMMR3/PDM.cpp170
-rw-r--r--src/VBox/VMM/VMMR3/PDMAsyncCompletion.cpp736
-rw-r--r--src/VBox/VMM/VMMR3/PDMAsyncCompletionFile.cpp156
-rw-r--r--src/VBox/VMM/VMMR3/PDMAsyncCompletionFileFailsafe.cpp2
-rw-r--r--src/VBox/VMM/VMMR3/PDMAsyncCompletionFileNormal.cpp67
-rw-r--r--src/VBox/VMM/VMMR3/PDMBlkCache.cpp19
-rw-r--r--src/VBox/VMM/VMMR3/PDMCritSect.cpp529
-rw-r--r--src/VBox/VMM/VMMR3/PDMDevHlp.cpp282
-rw-r--r--src/VBox/VMM/VMMR3/PDMDevMiscHlp.cpp137
-rw-r--r--src/VBox/VMM/VMMR3/PDMDevice.cpp50
-rw-r--r--src/VBox/VMM/VMMR3/PDMDriver.cpp63
-rw-r--r--src/VBox/VMM/VMMR3/PDMLdr.cpp98
-rw-r--r--src/VBox/VMM/VMMR3/PDMNetShaper.cpp319
-rw-r--r--src/VBox/VMM/VMMR3/PDMQueue.cpp45
-rw-r--r--src/VBox/VMM/VMMR3/PDMThread.cpp5
-rw-r--r--src/VBox/VMM/VMMR3/PDMUsb.cpp235
-rw-r--r--src/VBox/VMM/VMMR3/PGM.cpp332
-rw-r--r--src/VBox/VMM/VMMR3/PGMBth.h56
-rw-r--r--src/VBox/VMM/VMMR3/PGMDbg.cpp137
-rw-r--r--src/VBox/VMM/VMMR3/PGMGst.h19
-rw-r--r--src/VBox/VMM/VMMR3/PGMHandler.cpp31
-rw-r--r--src/VBox/VMM/VMMR3/PGMMap.cpp100
-rw-r--r--src/VBox/VMM/VMMR3/PGMPhys.cpp130
-rw-r--r--src/VBox/VMM/VMMR3/PGMPhysRWTmpl.h2
-rw-r--r--src/VBox/VMM/VMMR3/PGMPool.cpp117
-rw-r--r--src/VBox/VMM/VMMR3/PGMSavedState.cpp98
-rw-r--r--src/VBox/VMM/VMMR3/PGMSharedPage.cpp29
-rw-r--r--src/VBox/VMM/VMMR3/PGMShw.h32
-rw-r--r--src/VBox/VMM/VMMR3/SELM.cpp598
-rw-r--r--src/VBox/VMM/VMMR3/SSM.cpp416
-rw-r--r--src/VBox/VMM/VMMR3/STAM.cpp1222
-rw-r--r--src/VBox/VMM/VMMR3/TM.cpp137
-rw-r--r--src/VBox/VMM/VMMR3/TRPM.cpp295
-rw-r--r--src/VBox/VMM/VMMR3/VM.cpp1037
-rw-r--r--src/VBox/VMM/VMMR3/VMEmt.cpp72
-rw-r--r--src/VBox/VMM/VMMR3/VMM.cpp224
-rw-r--r--src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp37
-rw-r--r--src/VBox/VMM/VMMR3/VMMR3.def118
-rw-r--r--src/VBox/VMM/VMMR3/VMMSwitcher.cpp287
-rw-r--r--src/VBox/VMM/VMMR3/VMMTests.cpp421
-rw-r--r--src/VBox/VMM/VMMR3/VMReq.cpp178
-rw-r--r--src/VBox/VMM/VMMR3/cpus/AMD_Athlon_64_3200.h220
-rw-r--r--src/VBox/VMM/VMMR3/cpus/AMD_Athlon_64_X2_Dual_Core_4200.h188
-rw-r--r--src/VBox/VMM/VMMR3/cpus/AMD_FX_8150_Eight_Core.h379
-rw-r--r--src/VBox/VMM/VMMR3/cpus/AMD_Phenom_II_X6_1100T.h268
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Core_i5_3570.h335
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_2635QM.h326
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_3820QM.h383
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_3960X.h365
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Pentium_4_3_00GHz.h273
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Pentium_M_processor_2_00GHz.h212
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Intel_Xeon_X5482_3_20GHz.h241
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Makefile.kup0
-rw-r--r--src/VBox/VMM/VMMR3/cpus/Quad_Core_AMD_Opteron_2384.h266
-rw-r--r--src/VBox/VMM/VMMR3/cpus/VIA_QuadCore_L4700_1_2_GHz.h400
99 files changed, 21053 insertions, 10099 deletions
diff --git a/src/VBox/VMM/VMMR3/CFGM.cpp b/src/VBox/VMM/VMMR3/CFGM.cpp
index b0dcb508..4420e89f 100644
--- a/src/VBox/VMM/VMMR3/CFGM.cpp
+++ b/src/VBox/VMM/VMMR3/CFGM.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2008 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -33,9 +33,9 @@
* where they are protected from accessing information of any parents. This is
* is implemented via the CFGMR3SetRestrictedRoot() API.
*
- * Data validation out over the basic primitives is left to the caller. The
- * caller is in a better position to know the proper validation rules of the
- * individual properties.
+ * Data validation beyond the basic primitives is left to the caller. The caller
+ * is in a better position to know the proper validation rules of the individual
+ * properties.
*
* @see grp_cfgm
*
@@ -60,10 +60,13 @@
#include <VBox/vmm/mm.h>
#include "CFGMInternal.h"
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include <VBox/err.h>
#include <VBox/log.h>
#include <iprt/assert.h>
+#include <iprt/mem.h>
+#include <iprt/param.h>
#include <iprt/string.h>
#include <iprt/uuid.h>
@@ -78,7 +81,93 @@ static int cfgmR3ResolveNode(PCFGMNODE pNode, const char *pszPath, PCFGMNODE *p
static int cfgmR3ResolveLeaf(PCFGMNODE pNode, const char *pszName, PCFGMLEAF *ppLeaf);
static int cfgmR3InsertLeaf(PCFGMNODE pNode, const char *pszName, PCFGMLEAF *ppLeaf);
static void cfgmR3RemoveLeaf(PCFGMNODE pNode, PCFGMLEAF pLeaf);
-static void cfgmR3FreeValue(PCFGMLEAF pLeaf);
+static void cfgmR3FreeValue(PVM pVM, PCFGMLEAF pLeaf);
+
+
+/**
+ * Allocator wrapper.
+ *
+ * @returns Pointer to the allocated memory, NULL on failure.
+ * @param pVM The VM handle, if tree associated with one.
+ * @param enmTag The allocation tag.
+ * @param cb The size of the allocation.
+ */
+static void *cfgmR3MemAlloc(PVM pVM, MMTAG enmTag, size_t cb)
+{
+ if (pVM)
+ return MMR3HeapAlloc(pVM, enmTag, cb);
+ return RTMemAlloc(cb);
+}
+
+
+/**
+ * Free wrapper.
+ *
+ * @returns Pointer to the allocated memory, NULL on failure.
+ * @param pVM The VM handle, if tree associated with one.
+ * @param pv The memory block to free.
+ */
+static void cfgmR3MemFree(PVM pVM, void *pv)
+{
+ if (pVM)
+ MMR3HeapFree(pv);
+ else
+ RTMemFree(pv);
+}
+
+
+/**
+ * String allocator wrapper.
+ *
+ * @returns Pointer to the allocated memory, NULL on failure.
+ * @param pVM The VM handle, if tree associated with one.
+ * @param enmTag The allocation tag.
+ * @param cbString The size of the allocation, terminator included.
+ */
+static char *cfgmR3StrAlloc(PVM pVM, MMTAG enmTag, size_t cbString)
+{
+ if (pVM)
+ return (char *)MMR3HeapAlloc(pVM, enmTag, cbString);
+ return (char *)RTStrAlloc(cbString);
+}
+
+
+/**
+ * String free wrapper.
+ *
+ * @returns Pointer to the allocated memory, NULL on failure.
+ * @param pVM The VM handle, if tree associated with one.
+ * @param pszString The memory block to free.
+ */
+static void cfgmR3StrFree(PVM pVM, char *pszString)
+{
+ if (pVM)
+ MMR3HeapFree(pszString);
+ else
+ RTStrFree(pszString);
+}
+
+
+/**
+ * Frees one node, leaving any children or leaves to the caller.
+ *
+ * @param pNode The node structure to free.
+ */
+static void cfgmR3FreeNodeOnly(PCFGMNODE pNode)
+{
+ pNode->pFirstLeaf = NULL;
+ pNode->pFirstChild = NULL;
+ pNode->pNext = NULL;
+ pNode->pPrev = NULL;
+ if (!pNode->pVM)
+ RTMemFree(pNode);
+ else
+ {
+ pNode->pVM = NULL;
+ MMR3HeapFree(pNode);
+ }
+}
+
@@ -91,6 +180,7 @@ static void cfgmR3FreeValue(PCFGMLEAF pLeaf);
* This is called in the EM.
* @param pvUser The user argument passed to pfnCFGMConstructor.
* @thread EMT.
+ * @internal
*/
VMMR3DECL(int) CFGMR3Init(PVM pVM, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUser)
{
@@ -104,7 +194,8 @@ VMMR3DECL(int) CFGMR3Init(PVM pVM, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *
/*
* Register DBGF into item.
*/
- int rc = DBGFR3InfoRegisterInternal(pVM, "cfgm", "Dumps a part of the CFGM tree. The argument indicates where to start.", cfgmR3Info);
+ int rc = DBGFR3InfoRegisterInternal(pVM, "cfgm", "Dumps a part of the CFGM tree. The argument indicates where to start.",
+ cfgmR3Info);
AssertRCReturn(rc,rc);
/*
@@ -121,7 +212,7 @@ VMMR3DECL(int) CFGMR3Init(PVM pVM, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *
* Call the constructor if specified, if not use the default one.
*/
if (pfnCFGMConstructor)
- rc = pfnCFGMConstructor(pVM, pvUser);
+ rc = pfnCFGMConstructor(pVM->pUVM, pVM, pvUser);
else
rc = CFGMR3ConstructDefaultTree(pVM);
if (RT_SUCCESS(rc))
@@ -141,6 +232,7 @@ VMMR3DECL(int) CFGMR3Init(PVM pVM, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
+ * @internal
*/
VMMR3DECL(int) CFGMR3Term(PVM pVM)
{
@@ -163,6 +255,21 @@ VMMR3DECL(PCFGMNODE) CFGMR3GetRoot(PVM pVM)
/**
+ * Gets the root node for the VM.
+ *
+ * @returns Pointer to root node.
+ * @param pVM Pointer to the VM.
+ */
+VMMR3DECL(PCFGMNODE) CFGMR3GetRootU(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ PVM pVM = pUVM->pVM;
+ AssertReturn(pVM, NULL);
+ return pVM->cfgm.s.pRoot;
+}
+
+
+/**
* Gets the parent of a CFGM node.
*
* @returns Pointer to the parent node.
@@ -820,6 +927,7 @@ VMMR3DECL(int) CFGMR3ValidateConfig(PCFGMNODE pNode, const char *pszNode,
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
+ * @internal
*/
VMMR3DECL(int) CFGMR3ConstructDefaultTree(PVM pVM)
{
@@ -1179,12 +1287,27 @@ static int cfgmR3ResolveLeaf(PCFGMNODE pNode, const char *pszName, PCFGMLEAF *pp
* passed around and later attached to the main tree in the
* correct location.
*
- * @returns Pointer to the root node.
- * @param pVM Pointer to the VM.
+ * @returns Pointer to the root node, NULL on error (out of memory or invalid
+ * VM handle).
+ * @param pUVM The user mode VM handle. For testcase (and other
+ * purposes, NULL can be used. However, the resulting
+ * tree cannot be inserted into a tree that has a
+ * non-NULL value. Using NULL can be usedful for
+ * testcases and similar, non VMM uses.
*/
-VMMR3DECL(PCFGMNODE) CFGMR3CreateTree(PVM pVM)
+VMMR3DECL(PCFGMNODE) CFGMR3CreateTree(PUVM pUVM)
{
- PCFGMNODE pNew = (PCFGMNODE)MMR3HeapAlloc(pVM, MM_TAG_CFGM, sizeof(*pNew));
+ if (pUVM)
+ {
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
+ }
+
+ PCFGMNODE pNew;
+ if (pUVM)
+ pNew = (PCFGMNODE)MMR3HeapAllocU(pUVM, MM_TAG_CFGM, sizeof(*pNew));
+ else
+ pNew = (PCFGMNODE)RTMemAlloc(sizeof(*pNew));
if (pNew)
{
pNew->pPrev = NULL;
@@ -1192,7 +1315,7 @@ VMMR3DECL(PCFGMNODE) CFGMR3CreateTree(PVM pVM)
pNew->pParent = NULL;
pNew->pFirstChild = NULL;
pNew->pFirstLeaf = NULL;
- pNew->pVM = pVM;
+ pNew->pVM = pUVM ? pUVM->pVM : NULL;
pNew->fRestrictedRoot = false;
pNew->cchName = 0;
pNew->szName[0] = 0;
@@ -1216,7 +1339,7 @@ VMMR3DECL(int) CFGMR3DuplicateSubTree(PCFGMNODE pRoot, PCFGMNODE *ppCopy)
/*
* Create a new tree.
*/
- PCFGMNODE pNewRoot = CFGMR3CreateTree(pRoot->pVM);
+ PCFGMNODE pNewRoot = CFGMR3CreateTree(pRoot->pVM ? pRoot->pVM->pUVM : NULL);
if (!pNewRoot)
return VERR_NO_MEMORY;
@@ -1326,7 +1449,7 @@ VMMR3DECL(int) CFGMR3InsertSubTree(PCFGMNODE pNode, const char *pszName, PCFGMNO
AssertPtrReturn(pSubTree, VERR_INVALID_POINTER);
AssertReturn(pNode != pSubTree, VERR_INVALID_PARAMETER);
AssertReturn(!pSubTree->pParent, VERR_INVALID_PARAMETER);
- AssertReturn(pSubTree->pVM, VERR_INVALID_PARAMETER);
+ AssertReturn(pNode->pVM == pSubTree->pVM, VERR_INVALID_PARAMETER);
Assert(!pSubTree->pNext);
Assert(!pSubTree->pPrev);
@@ -1350,10 +1473,7 @@ VMMR3DECL(int) CFGMR3InsertSubTree(PCFGMNODE pNode, const char *pszName, PCFGMNO
*ppChild = pNewChild;
/* free the old subtree root */
- pSubTree->pVM = NULL;
- pSubTree->pFirstLeaf = NULL;
- pSubTree->pFirstChild = NULL;
- MMR3HeapFree(pSubTree);
+ cfgmR3FreeNodeOnly(pSubTree);
}
return rc;
}
@@ -1381,7 +1501,6 @@ VMMR3DECL(int) CFGMR3ReplaceSubTree(PCFGMNODE pRoot, PCFGMNODE pNewRoot)
AssertPtrReturn(pNewRoot, VERR_INVALID_POINTER);
AssertReturn(pRoot != pNewRoot, VERR_INVALID_PARAMETER);
AssertReturn(!pNewRoot->pParent, VERR_INVALID_PARAMETER);
- AssertReturn(pNewRoot->pVM, VERR_INVALID_PARAMETER);
AssertReturn(pNewRoot->pVM == pRoot->pVM, VERR_INVALID_PARAMETER);
AssertReturn(!pNewRoot->pNext, VERR_INVALID_PARAMETER);
AssertReturn(!pNewRoot->pPrev, VERR_INVALID_PARAMETER);
@@ -1403,10 +1522,7 @@ VMMR3DECL(int) CFGMR3ReplaceSubTree(PCFGMNODE pRoot, PCFGMNODE pNewRoot)
for (PCFGMNODE pChild = pRoot->pFirstChild; pChild; pChild = pChild->pNext)
pChild->pParent = pRoot;
- pNewRoot->pFirstLeaf = NULL;
- pNewRoot->pFirstChild = NULL;
- pNewRoot->pVM = NULL;
- MMR3HeapFree(pNewRoot);
+ cfgmR3FreeNodeOnly(pNewRoot);
return VINF_SUCCESS;
}
@@ -1618,7 +1734,7 @@ VMMR3DECL(int) CFGMR3InsertNode(PCFGMNODE pNode, const char *pszName, PCFGMNODE
/*
* Allocate and init node.
*/
- PCFGMNODE pNew = (PCFGMNODE)MMR3HeapAlloc(pNode->pVM, MM_TAG_CFGM, sizeof(*pNew) + cchName);
+ PCFGMNODE pNew = (PCFGMNODE)cfgmR3MemAlloc(pNode->pVM, MM_TAG_CFGM, sizeof(*pNew) + cchName);
if (pNew)
{
pNew->pParent = pNode;
@@ -1760,7 +1876,7 @@ static int cfgmR3InsertLeaf(PCFGMNODE pNode, const char *pszName, PCFGMLEAF *ppL
/*
* Allocate and init node.
*/
- PCFGMLEAF pNew = (PCFGMLEAF)MMR3HeapAlloc(pNode->pVM, MM_TAG_CFGM, sizeof(*pNew) + cchName);
+ PCFGMLEAF pNew = (PCFGMLEAF)cfgmR3MemAlloc(pNode->pVM, MM_TAG_CFGM, sizeof(*pNew) + cchName);
if (pNew)
{
pNew->cchName = cchName;
@@ -1794,9 +1910,9 @@ static int cfgmR3InsertLeaf(PCFGMNODE pNode, const char *pszName, PCFGMLEAF *ppL
/**
- * Remove a node.
+ * Removes a node.
*
- * @param pNode Parent node.
+ * @param pNode The node to remove.
*/
VMMR3DECL(void) CFGMR3RemoveNode(PCFGMNODE pNode)
{
@@ -1823,20 +1939,17 @@ VMMR3DECL(void) CFGMR3RemoveNode(PCFGMNODE pNode)
{
if (pNode->pParent)
pNode->pParent->pFirstChild = pNode->pNext;
- else if (pNode == pNode->pVM->cfgm.s.pRoot) /* might be a different tree */
+ else if ( pNode->pVM /* might be a different tree */
+ && pNode == pNode->pVM->cfgm.s.pRoot)
pNode->pVM->cfgm.s.pRoot = NULL;
}
if (pNode->pNext)
pNode->pNext->pPrev = pNode->pPrev;
/*
- * Free ourselves. (bit of paranoia first)
+ * Free ourselves.
*/
- pNode->pVM = NULL;
- pNode->pNext = NULL;
- pNode->pPrev = NULL;
- pNode->pParent = NULL;
- MMR3HeapFree(pNode);
+ cfgmR3FreeNodeOnly(pNode);
}
}
@@ -1864,10 +1977,10 @@ static void cfgmR3RemoveLeaf(PCFGMNODE pNode, PCFGMLEAF pLeaf)
/*
* Free value and node.
*/
- cfgmR3FreeValue(pLeaf);
+ cfgmR3FreeValue(pNode->pVM, pLeaf);
pLeaf->pNext = NULL;
pLeaf->pPrev = NULL;
- MMR3HeapFree(pLeaf);
+ cfgmR3MemFree(pNode->pVM, pLeaf);
}
}
@@ -1878,22 +1991,23 @@ static void cfgmR3RemoveLeaf(PCFGMNODE pNode, PCFGMLEAF pLeaf)
* Use this before assigning a new value to a leaf.
* The caller must either free the leaf or assign a new value to it.
*
+ * @param pVM Used to select the heap.
* @param pLeaf Pointer to the leaf which value should be free.
*/
-static void cfgmR3FreeValue(PCFGMLEAF pLeaf)
+static void cfgmR3FreeValue(PVM pVM, PCFGMLEAF pLeaf)
{
if (pLeaf)
{
switch (pLeaf->enmType)
{
case CFGMVALUETYPE_BYTES:
- MMR3HeapFree(pLeaf->Value.Bytes.pau8);
+ cfgmR3MemFree(pVM, pLeaf->Value.Bytes.pau8);
pLeaf->Value.Bytes.pau8 = NULL;
pLeaf->Value.Bytes.cb = 0;
break;
case CFGMVALUETYPE_STRING:
- MMR3HeapFree(pLeaf->Value.String.psz);
+ cfgmR3StrFree(pVM, pLeaf->Value.String.psz);
pLeaf->Value.String.psz = NULL;
pLeaf->Value.String.cb = 0;
break;
@@ -1905,6 +2019,23 @@ static void cfgmR3FreeValue(PCFGMLEAF pLeaf)
}
}
+/**
+ * Destroys a tree created with CFGMR3CreateTree or CFGMR3DuplicateSubTree.
+ *
+ * @returns VBox status code.
+ * @param pRoot The root node of the tree.
+ */
+VMMR3DECL(int) CFGMR3DestroyTree(PCFGMNODE pRoot)
+{
+ if (!pRoot)
+ return VINF_SUCCESS;
+ AssertReturn(!pRoot->pParent, VERR_INVALID_PARAMETER);
+ AssertReturn(!pRoot->pVM || pRoot != pRoot->pVM->cfgm.s.pRoot, VERR_ACCESS_DENIED);
+
+ CFGMR3RemoveNode(pRoot);
+ return VINF_SUCCESS;
+}
+
/**
* Inserts a new integer value.
@@ -1948,7 +2079,7 @@ VMMR3DECL(int) CFGMR3InsertStringN(PCFGMNODE pNode, const char *pszName, const c
/*
* Allocate string object first.
*/
- char *pszStringCopy = (char *)MMR3HeapAlloc(pNode->pVM, MM_TAG_CFGM_STRING, cchString + 1);
+ char *pszStringCopy = (char *)cfgmR3StrAlloc(pNode->pVM, MM_TAG_CFGM_STRING, cchString + 1);
if (pszStringCopy)
{
memcpy(pszStringCopy, pszString, cchString);
@@ -1966,7 +2097,7 @@ VMMR3DECL(int) CFGMR3InsertStringN(PCFGMNODE pNode, const char *pszName, const c
pLeaf->Value.String.cb = cchString + 1;
}
else
- MMR3HeapFree(pszStringCopy);
+ cfgmR3StrFree(pNode->pVM, pszStringCopy);
}
else
rc = VERR_NO_MEMORY;
@@ -2011,7 +2142,11 @@ VMMR3DECL(int) CFGMR3InsertStringFV(PCFGMNODE pNode, const char *pszName, const
/*
* Allocate string object first.
*/
- char *pszString = MMR3HeapAPrintfVU(pNode->pVM->pUVM, MM_TAG_CFGM_STRING, pszFormat, va);
+ char *pszString;
+ if (!pNode->pVM)
+ pszString = RTStrAPrintf2(pszFormat, va);
+ else
+ pszString = MMR3HeapAPrintfVU(pNode->pVM->pUVM, MM_TAG_CFGM_STRING, pszFormat, va);
if (pszString)
{
/*
@@ -2026,7 +2161,7 @@ VMMR3DECL(int) CFGMR3InsertStringFV(PCFGMNODE pNode, const char *pszName, const
pLeaf->Value.String.cb = strlen(pszString) + 1;
}
else
- MMR3HeapFree(pszString);
+ cfgmR3StrFree(pNode->pVM, pszString);
}
else
rc = VERR_NO_MEMORY;
@@ -2098,7 +2233,7 @@ VMMR3DECL(int) CFGMR3InsertBytes(PCFGMNODE pNode, const char *pszName, const voi
/*
* Allocate string object first.
*/
- void *pvCopy = MMR3HeapAlloc(pNode->pVM, MM_TAG_CFGM_STRING, cbBytes);
+ void *pvCopy = cfgmR3MemAlloc(pNode->pVM, MM_TAG_CFGM_STRING, cbBytes);
if (pvCopy || !cbBytes)
{
memcpy(pvCopy, pvBytes, cbBytes);
@@ -2114,6 +2249,8 @@ VMMR3DECL(int) CFGMR3InsertBytes(PCFGMNODE pNode, const char *pszName, const voi
pLeaf->Value.Bytes.cb = cbBytes;
pLeaf->Value.Bytes.pau8 = (uint8_t *)pvCopy;
}
+ else
+ cfgmR3MemFree(pNode->pVM, pvCopy);
}
else
rc = VERR_NO_MEMORY;
@@ -2892,7 +3029,8 @@ VMMR3DECL(int) CFGMR3QueryGCPtrSDef(PCFGMNODE pNode, const char *pszName, PRTGCI
* @param pNode Which node to search for pszName in.
* @param pszName Value name. This value must be of zero terminated character string type.
* @param ppszString Where to store the string pointer.
- * Free this using MMR3HeapFree().
+ * Free this using MMR3HeapFree() (or RTStrFree if not
+ * associated with a pUVM - see CFGMR3CreateTree).
*/
VMMR3DECL(int) CFGMR3QueryStringAlloc(PCFGMNODE pNode, const char *pszName, char **ppszString)
{
@@ -2900,14 +3038,14 @@ VMMR3DECL(int) CFGMR3QueryStringAlloc(PCFGMNODE pNode, const char *pszName, char
int rc = CFGMR3QuerySize(pNode, pszName, &cbString);
if (RT_SUCCESS(rc))
{
- char *pszString = (char *)MMR3HeapAlloc(pNode->pVM, MM_TAG_CFGM_USER, cbString);
+ char *pszString = cfgmR3StrAlloc(pNode->pVM, MM_TAG_CFGM_USER, cbString);
if (pszString)
{
rc = CFGMR3QueryString(pNode, pszName, pszString, cbString);
if (RT_SUCCESS(rc))
*ppszString = pszString;
else
- MMR3HeapFree(pszString);
+ cfgmR3StrFree(pNode->pVM, pszString);
}
else
rc = VERR_NO_MEMORY;
@@ -2927,7 +3065,8 @@ VMMR3DECL(int) CFGMR3QueryStringAlloc(PCFGMNODE pNode, const char *pszName, char
* MMR3HeapStrDup.
* @param pszName Value name. This value must be of zero terminated character string type.
* @param ppszString Where to store the string pointer. Not set on failure.
- * Free this using MMR3HeapFree().
+ * Free this using MMR3HeapFree() (or RTStrFree if not
+ * associated with a pUVM - see CFGMR3CreateTree).
* @param pszDef The default return value. This can be NULL.
*/
VMMR3DECL(int) CFGMR3QueryStringAllocDef(PCFGMNODE pNode, const char *pszName, char **ppszString, const char *pszDef)
@@ -2945,7 +3084,7 @@ VMMR3DECL(int) CFGMR3QueryStringAllocDef(PCFGMNODE pNode, const char *pszName, c
if (pLeaf->enmType == CFGMVALUETYPE_STRING)
{
size_t const cbSrc = pLeaf->Value.String.cb;
- char *pszString = (char *)MMR3HeapAlloc(pNode->pVM, MM_TAG_CFGM_USER, cbSrc);
+ char *pszString = cfgmR3StrAlloc(pNode->pVM, MM_TAG_CFGM_USER, cbSrc);
if (pszString)
{
memcpy(pszString, pLeaf->Value.String.psz, cbSrc);
@@ -2962,7 +3101,11 @@ VMMR3DECL(int) CFGMR3QueryStringAllocDef(PCFGMNODE pNode, const char *pszName, c
if (!pszDef)
*ppszString = NULL;
else
- *ppszString = MMR3HeapStrDup(pNode->pVM, MM_TAG_CFGM_USER, pszDef);
+ {
+ size_t const cbDef = strlen(pszDef) + 1;
+ *ppszString = cfgmR3StrAlloc(pNode->pVM, MM_TAG_CFGM_USER, cbDef);
+ memcpy(*ppszString, pszDef, cbDef);
+ }
if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT)
rc = VINF_SUCCESS;
}
diff --git a/src/VBox/VMM/VMMR3/CPUM.cpp b/src/VBox/VMM/VMMR3/CPUM.cpp
index 61587384..a7315d3b 100644
--- a/src/VBox/VMM/VMMR3/CPUM.cpp
+++ b/src/VBox/VMM/VMMR3/CPUM.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2012 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -39,11 +39,13 @@
#include <VBox/vmm/cpumdis.h>
#include <VBox/vmm/cpumctx-v1_6.h>
#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/pdmapi.h>
#include <VBox/vmm/mm.h>
+#include <VBox/vmm/em.h>
#include <VBox/vmm/selm.h>
#include <VBox/vmm/dbgf.h>
#include <VBox/vmm/patm.h>
-#include <VBox/vmm/hwaccm.h>
+#include <VBox/vmm/hm.h>
#include <VBox/vmm/ssm.h>
#include "CPUMInternal.h"
#include <VBox/vmm/vm.h>
@@ -52,11 +54,12 @@
#include <VBox/dis.h>
#include <VBox/err.h>
#include <VBox/log.h>
-#include <iprt/assert.h>
#include <iprt/asm-amd64-x86.h>
-#include <iprt/string.h>
-#include <iprt/mp.h>
+#include <iprt/assert.h>
#include <iprt/cpuset.h>
+#include <iprt/mem.h>
+#include <iprt/mp.h>
+#include <iprt/string.h>
#include "internal/pgm.h"
@@ -112,7 +115,6 @@ typedef CPUMDUMPTYPE *PCPUMDUMPTYPE;
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
-static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
static int cpumR3CpuIdInit(PVM pVM);
static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM);
@@ -247,6 +249,7 @@ static const SSMFIELD g_aCpumCtxFields[] =
SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
+ /* msrApicBase is not included here, it resides in the APIC device state. */
SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
SSMFIELD_ENTRY( CPUMCTX, ldtr.ValidSel),
SSMFIELD_ENTRY( CPUMCTX, ldtr.fFlags),
@@ -531,6 +534,42 @@ static const SSMFIELD g_aCpumCtxFieldsV16[] =
/**
+ * Checks for partial/leaky FXSAVE/FXRSTOR handling on AMD CPUs.
+ *
+ * AMD K7, K8 and newer AMD CPUs do not save/restore the x87 error
+ * pointers (last instruction pointer, last data pointer, last opcode)
+ * except when the ES bit (Exception Summary) in x87 FSW (FPU Status
+ * Word) is set. Thus if we don't clear these registers there is
+ * potential, local FPU leakage from a process using the FPU to
+ * another.
+ *
+ * See AMD Instruction Reference for FXSAVE, FXRSTOR.
+ *
+ * @param pVM Pointer to the VM.
+ */
+static void cpumR3CheckLeakyFpu(PVM pVM)
+{
+ uint32_t u32CpuVersion = ASMCpuId_EAX(1);
+ uint32_t const u32Family = u32CpuVersion >> 8;
+ if ( u32Family >= 6 /* K7 and higher */
+ && ASMIsAmdCpu())
+ {
+ uint32_t cExt = ASMCpuId_EAX(0x80000000);
+ if (ASMIsValidExtRange(cExt))
+ {
+ uint32_t fExtFeaturesEDX = ASMCpuId_EDX(0x80000001);
+ if (fExtFeaturesEDX & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
+ {
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_FFXSR_LEAKY;
+ Log(("CPUMR3Init: host CPU has leaky fxsave/fxrstor behaviour\n"));
+ }
+ }
+ }
+}
+
+
+/**
* Initializes the CPUM.
*
* @returns VBox status code.
@@ -541,7 +580,7 @@ VMMR3DECL(int) CPUMR3Init(PVM pVM)
LogFlow(("CPUMR3Init\n"));
/*
- * Assert alignment and sizes.
+ * Assert alignment, sizes and tables.
*/
AssertCompileMemberAlignment(VM, cpum.s, 32);
AssertCompile(sizeof(pVM->cpum.s) <= sizeof(pVM->cpum.padding));
@@ -552,11 +591,16 @@ VMMR3DECL(int) CPUMR3Init(PVM pVM)
AssertCompileMemberAlignment(VM, aCpus, 64);
AssertCompileMemberAlignment(VMCPU, cpum.s, 64);
AssertCompileMemberSizeAlignment(VM, aCpus[0].cpum.s, 64);
+#ifdef VBOX_STRICT
+ int rc2 = cpumR3MsrStrictInitChecks();
+ AssertRCReturn(rc2, rc2);
+#endif
/* Calculate the offset from CPUM to CPUMCPU for the first CPU. */
pVM->cpum.s.offCPUMCPU0 = RT_OFFSETOF(VM, aCpus[0].cpum) - RT_OFFSETOF(VM, cpum);
Assert((uintptr_t)&pVM->cpum + pVM->cpum.s.offCPUMCPU0 == (uintptr_t)&pVM->aCpus[0].cpum);
+
/* Calculate the offset from CPUMCPU to CPUM. */
for (VMCPUID i = 0; i < pVM->cCpus; i++)
{
@@ -607,13 +651,17 @@ VMMR3DECL(int) CPUMR3Init(PVM pVM)
Log(("The CPU doesn't support SYSENTER/SYSEXIT!\n"));
/*
- * Detect the host CPU vendor.
- * (The guest CPU vendor is re-detected later on.)
+ * Gather info about the host CPU.
*/
- uint32_t uEAX, uEBX, uECX, uEDX;
- ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
- pVM->cpum.s.enmHostCpuVendor = cpumR3DetectVendor(uEAX, uEBX, uECX, uEDX);
- pVM->cpum.s.enmGuestCpuVendor = pVM->cpum.s.enmHostCpuVendor;
+ PCPUMCPUIDLEAF paLeaves;
+ uint32_t cLeaves;
+ int rc = CPUMR3CpuIdCollectLeaves(&paLeaves, &cLeaves);
+ AssertLogRelRCReturn(rc, rc);
+
+ rc = cpumR3CpuIdExplodeFeatures(paLeaves, cLeaves, &pVM->cpum.s.HostFeatures);
+ RTMemFree(paLeaves);
+ AssertLogRelRCReturn(rc, rc);
+ pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.enmCpuVendor;
/*
* Setup hypervisor startup values.
@@ -622,10 +670,10 @@ VMMR3DECL(int) CPUMR3Init(PVM pVM)
/*
* Register saved state data item.
*/
- int rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
- NULL, cpumR3LiveExec, NULL,
- NULL, cpumR3SaveExec, NULL,
- cpumR3LoadPrep, cpumR3LoadExec, cpumR3LoadDone);
+ rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
+ NULL, cpumR3LiveExec, NULL,
+ NULL, cpumR3SaveExec, NULL,
+ cpumR3LoadPrep, cpumR3LoadExec, cpumR3LoadDone);
if (RT_FAILURE(rc))
return rc;
@@ -644,6 +692,11 @@ VMMR3DECL(int) CPUMR3Init(PVM pVM)
return rc;
/*
+ * Check if we need to workaround partial/leaky FPU handling.
+ */
+ cpumR3CheckLeakyFpu(pVM);
+
+ /*
* Initialize the Guest CPUID state.
*/
rc = cpumR3CpuIdInit(pVM);
@@ -655,40 +708,178 @@ VMMR3DECL(int) CPUMR3Init(PVM pVM)
/**
- * Detect the CPU vendor give n the
+ * Loads MSR range overrides.
+ *
+ * This must be called before the MSR ranges are moved from the normal heap to
+ * the hyper heap!
*
- * @returns The vendor.
- * @param uEAX EAX from CPUID(0).
- * @param uEBX EBX from CPUID(0).
- * @param uECX ECX from CPUID(0).
- * @param uEDX EDX from CPUID(0).
+ * @returns VBox status code (VMSetError called).
+ * @param pVM Pointer to the cross context VM structure
+ * @param pMsrNode The CFGM node with the MSR overrides.
*/
-static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
+static int cpumR3LoadMsrOverrides(PVM pVM, PCFGMNODE pMsrNode)
{
- if ( uEAX >= 1
- && uEBX == X86_CPUID_VENDOR_AMD_EBX
- && uECX == X86_CPUID_VENDOR_AMD_ECX
- && uEDX == X86_CPUID_VENDOR_AMD_EDX)
- return CPUMCPUVENDOR_AMD;
-
- if ( uEAX >= 1
- && uEBX == X86_CPUID_VENDOR_INTEL_EBX
- && uECX == X86_CPUID_VENDOR_INTEL_ECX
- && uEDX == X86_CPUID_VENDOR_INTEL_EDX)
- return CPUMCPUVENDOR_INTEL;
-
- if ( uEAX >= 1
- && uEBX == X86_CPUID_VENDOR_VIA_EBX
- && uECX == X86_CPUID_VENDOR_VIA_ECX
- && uEDX == X86_CPUID_VENDOR_VIA_EDX)
- return CPUMCPUVENDOR_VIA;
-
- /** @todo detect the other buggers... */
- return CPUMCPUVENDOR_UNKNOWN;
+ for (PCFGMNODE pNode = CFGMR3GetFirstChild(pMsrNode); pNode; pNode = CFGMR3GetNextChild(pNode))
+ {
+ /*
+ * Assemble a valid MSR range.
+ */
+ CPUMMSRRANGE MsrRange;
+ MsrRange.offCpumCpu = 0;
+ MsrRange.fReserved = 0;
+
+ int rc = CFGMR3GetName(pNode, MsrRange.szName, sizeof(MsrRange.szName));
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry (name is probably too long): %Rrc\n", rc);
+
+ rc = CFGMR3QueryU32(pNode, "First", &MsrRange.uFirst);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying mandatory 'First' value: %Rrc\n",
+ MsrRange.szName, rc);
+
+ rc = CFGMR3QueryU32Def(pNode, "Last", &MsrRange.uLast, MsrRange.uFirst);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Last' value: %Rrc\n",
+ MsrRange.szName, rc);
+
+ char szType[32];
+ rc = CFGMR3QueryStringDef(pNode, "Type", szType, sizeof(szType), "FixedValue");
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Type' value: %Rrc\n",
+ MsrRange.szName, rc);
+ if (!RTStrICmp(szType, "FixedValue"))
+ {
+ MsrRange.enmRdFn = kCpumMsrRdFn_FixedValue;
+ MsrRange.enmWrFn = kCpumMsrWrFn_IgnoreWrite;
+
+ rc = CFGMR3QueryU64Def(pNode, "Value", &MsrRange.uValue, 0);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Value' value: %Rrc\n",
+ MsrRange.szName, rc);
+
+ rc = CFGMR3QueryU64Def(pNode, "WrGpMask", &MsrRange.fWrGpMask, 0);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrGpMask' value: %Rrc\n",
+ MsrRange.szName, rc);
+
+ rc = CFGMR3QueryU64Def(pNode, "WrIgnMask", &MsrRange.fWrIgnMask, 0);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrIgnMask' value: %Rrc\n",
+ MsrRange.szName, rc);
+ }
+ else
+ return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS,
+ "Invalid MSR entry '%s': Unknown type '%s'\n", MsrRange.szName, szType);
+
+ /*
+ * Insert the range into the table (replaces/splits/shrinks existing
+ * MSR ranges).
+ */
+ rc = cpumR3MsrRangesInsert(&pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges, &MsrRange);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Error adding MSR entry '%s': %Rrc\n", MsrRange.szName, rc);
+ }
+
+ return VINF_SUCCESS;
}
/**
+ * Loads CPUID leaf overrides.
+ *
+ * This must be called before the CPUID leaves are moved from the normal
+ * heap to the hyper heap!
+ *
+ * @returns VBox status code (VMSetError called).
+ * @param pVM Pointer to the cross context VM structure
+ * @param pParentNode The CFGM node with the CPUID leaves.
+ * @param pszLabel How to label the overrides we're loading.
+ */
+static int cpumR3LoadCpuIdOverrides(PVM pVM, PCFGMNODE pParentNode, const char *pszLabel)
+{
+ for (PCFGMNODE pNode = CFGMR3GetFirstChild(pParentNode); pNode; pNode = CFGMR3GetNextChild(pNode))
+ {
+ /*
+ * Get the leaf and subleaf numbers.
+ */
+ char szName[128];
+ int rc = CFGMR3GetName(pNode, szName, sizeof(szName));
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry (name is probably too long): %Rrc\n", pszLabel, rc);
+
+ /* The leaf number is either specified directly or thru the node name. */
+ uint32_t uLeaf;
+ rc = CFGMR3QueryU32(pNode, "Leaf", &uLeaf);
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ {
+ rc = RTStrToUInt32Full(szName, 16, &uLeaf);
+ if (rc != VINF_SUCCESS)
+ return VMSetError(pVM, VERR_INVALID_NAME, RT_SRC_POS,
+ "Invalid %s entry: Invalid leaf number: '%s' \n", pszLabel, szName);
+ }
+ else if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'Leaf' value: %Rrc\n",
+ pszLabel, szName, rc);
+
+ uint32_t uSubLeaf;
+ rc = CFGMR3QueryU32Def(pNode, "SubLeaf", &uSubLeaf, 0);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeaf' value: %Rrc\n",
+ pszLabel, szName, rc);
+
+ uint32_t fSubLeafMask;
+ rc = CFGMR3QueryU32Def(pNode, "SubLeafMask", &fSubLeafMask, 0);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeafMask' value: %Rrc\n",
+ pszLabel, szName, rc);
+
+ /*
+ * Look up the specified leaf, since the output register values
+ * defaults to any existing values. This allows overriding a single
+ * register, without needing to know the other values.
+ */
+ PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, pVM->cpum.s.GuestInfo.cCpuIdLeaves,
+ uLeaf, uSubLeaf);
+ CPUMCPUIDLEAF Leaf;
+ if (pLeaf)
+ Leaf = *pLeaf;
+ else
+ RT_ZERO(Leaf);
+ Leaf.uLeaf = uLeaf;
+ Leaf.uSubLeaf = uSubLeaf;
+ Leaf.fSubLeafMask = fSubLeafMask;
+
+ rc = CFGMR3QueryU32Def(pNode, "eax", &Leaf.uEax, Leaf.uEax);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'eax' value: %Rrc\n",
+ pszLabel, szName, rc);
+ rc = CFGMR3QueryU32Def(pNode, "ebx", &Leaf.uEbx, Leaf.uEbx);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ebx' value: %Rrc\n",
+ pszLabel, szName, rc);
+ rc = CFGMR3QueryU32Def(pNode, "ecx", &Leaf.uEcx, Leaf.uEcx);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ecx' value: %Rrc\n",
+ pszLabel, szName, rc);
+ rc = CFGMR3QueryU32Def(pNode, "edx", &Leaf.uEdx, Leaf.uEdx);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'edx' value: %Rrc\n",
+ pszLabel, szName, rc);
+
+ /*
+ * Insert the leaf into the table (replaces existing ones).
+ */
+ rc = cpumR3CpuIdInsert(&pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, &pVM->cpum.s.GuestInfo.cCpuIdLeaves, &Leaf);
+ if (RT_FAILURE(rc))
+ return VMSetError(pVM, rc, RT_SRC_POS, "Error adding CPUID leaf entry '%s': %Rrc\n", szName, rc);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+
+/**
* Fetches overrides for a CPUID leaf.
*
* @returns VBox status code.
@@ -767,7 +958,7 @@ static int cpumR3CpuIdInitHostSet(uint32_t uStart, PCPUMCPUID paLeaves, uint32_t
{
/* Using the ECX variant for all of them can't hurt... */
for (uint32_t i = 0; i < cLeaves; i++)
- ASMCpuId_Idx_ECX(uStart + i, 0, &paLeaves[i].eax, &paLeaves[i].ebx, &paLeaves[i].ecx, &paLeaves[i].edx);
+ ASMCpuIdExSlow(uStart + i, 0, 0, 0, &paLeaves[i].eax, &paLeaves[i].ebx, &paLeaves[i].ecx, &paLeaves[i].edx);
/* Load CPUID leaf override; we currently don't care if the user
specifies features the host CPU doesn't support. */
@@ -775,6 +966,76 @@ static int cpumR3CpuIdInitHostSet(uint32_t uStart, PCPUMCPUID paLeaves, uint32_t
}
+static int cpumR3CpuIdInstallAndExplodeLeaves(PVM pVM, PCPUM pCPUM, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves)
+{
+ /*
+ * Install the CPUID information.
+ */
+ int rc = MMHyperDupMem(pVM, paLeaves, sizeof(paLeaves[0]) * cLeaves, 32,
+ MM_TAG_CPUM_CPUID, (void **)&pCPUM->GuestInfo.paCpuIdLeavesR3);
+
+ AssertLogRelRCReturn(rc, rc);
+
+ pCPUM->GuestInfo.paCpuIdLeavesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3);
+ pCPUM->GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3);
+ Assert(MMHyperR0ToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesR0) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3);
+ Assert(MMHyperRCToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesRC) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3);
+
+ /*
+ * Explode the guest CPU features.
+ */
+ rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Adjust the scalable bus frequency according to the CPUID information
+ * we're now using.
+ */
+ if (CPUMMICROARCH_IS_INTEL_CORE7(pVM->cpum.s.GuestFeatures.enmMicroarch))
+ pCPUM->GuestInfo.uScalableBusFreq = pCPUM->GuestFeatures.enmMicroarch >= kCpumMicroarch_Intel_Core7_SandyBridge
+ ? UINT64_C(100000000) /* 100MHz */
+ : UINT64_C(133333333); /* 133MHz */
+
+ /*
+ * Populate the legacy arrays. Currently used for everything, later only
+ * for patch manager.
+ */
+ struct { PCPUMCPUID paCpuIds; uint32_t cCpuIds, uBase; } aOldRanges[] =
+ {
+ { pCPUM->aGuestCpuIdStd, RT_ELEMENTS(pCPUM->aGuestCpuIdStd), 0x00000000 },
+ { pCPUM->aGuestCpuIdExt, RT_ELEMENTS(pCPUM->aGuestCpuIdExt), 0x80000000 },
+ { pCPUM->aGuestCpuIdCentaur, RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), 0xc0000000 },
+ { pCPUM->aGuestCpuIdHyper, RT_ELEMENTS(pCPUM->aGuestCpuIdHyper), 0x40000000 },
+ };
+ for (uint32_t i = 0; i < RT_ELEMENTS(aOldRanges); i++)
+ {
+ uint32_t cLeft = aOldRanges[i].cCpuIds;
+ uint32_t uLeaf = aOldRanges[i].uBase + cLeft;
+ PCPUMCPUID pLegacyLeaf = &aOldRanges[i].paCpuIds[cLeft];
+ while (cLeft-- > 0)
+ {
+ uLeaf--;
+ pLegacyLeaf--;
+
+ PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, uLeaf, 0);
+ if (pLeaf)
+ {
+ pLegacyLeaf->eax = pLeaf->uEax;
+ pLegacyLeaf->ebx = pLeaf->uEbx;
+ pLegacyLeaf->ecx = pLeaf->uEcx;
+ pLegacyLeaf->edx = pLeaf->uEdx;
+ }
+ else
+ *pLegacyLeaf = pCPUM->GuestInfo.DefCpuId;
+ }
+ }
+
+ pCPUM->GuestCpuIdDef = pCPUM->GuestInfo.DefCpuId;
+
+ return VINF_SUCCESS;
+}
+
+
/**
* Initializes the emulated CPU's cpuid information.
*
@@ -785,20 +1046,19 @@ static int cpumR3CpuIdInit(PVM pVM)
{
PCPUM pCPUM = &pVM->cpum.s;
PCFGMNODE pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM");
- uint32_t i;
int rc;
-#define PORTABLE_CLEAR_BITS_WHEN(Lvl, LeafSuffReg, FeatNm, fMask, uValue) \
- if (pCPUM->u8PortableCpuIdLevel >= (Lvl) && (pCPUM->aGuestCpuId##LeafSuffReg & (fMask)) == (uValue) ) \
+#define PORTABLE_CLEAR_BITS_WHEN(Lvl, a_pLeafReg, FeatNm, fMask, uValue) \
+ if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fMask)) == (uValue) ) \
{ \
- LogRel(("PortableCpuId: " #LeafSuffReg "[" #FeatNm "]: %#x -> 0\n", pCPUM->aGuestCpuId##LeafSuffReg & (fMask))); \
- pCPUM->aGuestCpuId##LeafSuffReg &= ~(uint32_t)(fMask); \
+ LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: %#x -> 0\n", (a_pLeafReg) & (fMask))); \
+ (a_pLeafReg) &= ~(uint32_t)(fMask); \
}
-#define PORTABLE_DISABLE_FEATURE_BIT(Lvl, LeafSuffReg, FeatNm, fBitMask) \
- if (pCPUM->u8PortableCpuIdLevel >= (Lvl) && (pCPUM->aGuestCpuId##LeafSuffReg & (fBitMask)) ) \
+#define PORTABLE_DISABLE_FEATURE_BIT(Lvl, a_pLeafReg, FeatNm, fBitMask) \
+ if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fBitMask)) ) \
{ \
- LogRel(("PortableCpuId: " #LeafSuffReg "[" #FeatNm "]: 1 -> 0\n")); \
- pCPUM->aGuestCpuId##LeafSuffReg &= ~(uint32_t)(fBitMask); \
+ LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: 1 -> 0\n")); \
+ (a_pLeafReg) &= ~(uint32_t)(fBitMask); \
}
/*
@@ -807,8 +1067,11 @@ static int cpumR3CpuIdInit(PVM pVM)
/** @cfgm{CPUM/SyntheticCpu, boolean, false}
* Enables the Synthetic CPU. The Vendor ID and Processor Name are
* completely overridden by VirtualBox custom strings. Some
- * CPUID information is withheld, like the cache info. */
- rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu", &pCPUM->fSyntheticCpu, false);
+ * CPUID information is withheld, like the cache info.
+ *
+ * This is obsoleted by PortableCpuIdLevel. */
+ bool fSyntheticCpu;
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu", &fSyntheticCpu, false);
AssertRCReturn(rc, rc);
/** @cfgm{CPUM/PortableCpuIdLevel, 8-bit, 0, 3, 0}
@@ -816,55 +1079,127 @@ static int cpumR3CpuIdInit(PVM pVM)
* stripped. The higher the value the more features gets stripped. Higher
* values should only be used when older CPUs are involved since it may
* harm performance and maybe also cause problems with specific guests. */
- rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pCPUM->u8PortableCpuIdLevel, 0);
- AssertRCReturn(rc, rc);
+ rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pCPUM->u8PortableCpuIdLevel, fSyntheticCpu ? 1 : 0);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{CPUM/GuestCpuName, string}
+ * The name of of the CPU we're to emulate. The default is the host CPU.
+ * Note! CPUs other than "host" one is currently unsupported. */
+ char szCpuName[128];
+ rc = CFGMR3QueryStringDef(pCpumCfg, "GuestCpuName", szCpuName, sizeof(szCpuName), "host");
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/CMPXCHG16B, boolean, false}
+ * Expose CMPXCHG16B to the guest if supported by the host.
+ */
+ bool fCmpXchg16b;
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "CMPXCHG16B", &fCmpXchg16b, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/MONITOR, boolean, true}
+ * Expose MONITOR/MWAIT instructions to the guest.
+ */
+ bool fMonitor;
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "MONITOR", &fMonitor, true);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/MWaitExtensions, boolean, false}
+ * Expose MWAIT extended features to the guest. For now we expose just MWAIT
+ * break on interrupt feature (bit 1).
+ */
+ bool fMWaitExtensions;
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "MWaitExtensions", &fMWaitExtensions, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/SSE4.1, boolean, false}
+ * Expose SSE4.1 to the guest if available.
+ */
+ bool fSse41;
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "SSE4.1", &fSse41, false);
+ AssertLogRelRCReturn(rc, rc);
- AssertLogRelReturn(!pCPUM->fSyntheticCpu || !pCPUM->u8PortableCpuIdLevel, VERR_CPUM_INCOMPATIBLE_CONFIG);
+ /** @cfgm{/CPUM/SSE4.2, boolean, false}
+ * Expose SSE4.2 to the guest if available.
+ */
+ bool fSse42;
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "SSE4.2", &fSse42, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}
+ * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from
+ * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).
+ * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].
+ */
+ bool fNt4LeafLimit;
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX}
+ * Restrict the reported CPU family+model+stepping of intel CPUs. This is
+ * probably going to be a temporary hack, so don't depend on this.
+ * The 1st byte of the value is the stepping, the 2nd byte value is the model
+ * number and the 3rd byte value is the family, and the 4th value must be zero.
+ */
+ uint32_t uMaxIntelFamilyModelStep;
+ rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &uMaxIntelFamilyModelStep, UINT32_MAX);
+ AssertLogRelRCReturn(rc, rc);
/*
- * Get the host CPUID leaves and redetect the guest CPU vendor (could've
- * been overridden).
+ * Get the guest CPU data from the database and/or the host.
*/
+ rc = cpumR3DbGetCpuInfo(szCpuName, &pCPUM->GuestInfo);
+ if (RT_FAILURE(rc))
+ return rc == VERR_CPUM_DB_CPU_NOT_FOUND
+ ? VMSetError(pVM, rc, RT_SRC_POS,
+ "Info on guest CPU '%s' could not be found. Please, select a different CPU.", szCpuName)
+ : rc;
+
+ /** @cfgm{CPUM/MSRs/[Name]/[First|Last|Type|Value|...],}
+ * Overrides the guest MSRs.
+ */
+ rc = cpumR3LoadMsrOverrides(pVM, CFGMR3GetChild(pCpumCfg, "MSRs"));
+
/** @cfgm{CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
- * Overrides the host CPUID leaf values used for calculating the guest CPUID
- * leaves. This can be used to preserve the CPUID values when moving a VM to a
- * different machine. Another use is restricting (or extending) the feature set
- * exposed to the guest. */
- PCFGMNODE pHostOverrideCfg = CFGMR3GetChild(pCpumCfg, "HostCPUID");
- rc = cpumR3CpuIdInitHostSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0], RT_ELEMENTS(pCPUM->aGuestCpuIdStd), pHostOverrideCfg);
- AssertRCReturn(rc, rc);
- rc = cpumR3CpuIdInitHostSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0], RT_ELEMENTS(pCPUM->aGuestCpuIdExt), pHostOverrideCfg);
- AssertRCReturn(rc, rc);
- rc = cpumR3CpuIdInitHostSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pHostOverrideCfg);
- AssertRCReturn(rc, rc);
+ * Overrides the CPUID leaf values (from the host CPU usually) used for
+ * calculating the guest CPUID leaves. This can be used to preserve the CPUID
+ * values when moving a VM to a different machine. Another use is restricting
+ * (or extending) the feature set exposed to the guest. */
+ if (RT_SUCCESS(rc))
+ rc = cpumR3LoadCpuIdOverrides(pVM, CFGMR3GetChild(pCpumCfg, "HostCPUID"), "HostCPUID");
- pCPUM->enmGuestCpuVendor = cpumR3DetectVendor(pCPUM->aGuestCpuIdStd[0].eax, pCPUM->aGuestCpuIdStd[0].ebx,
- pCPUM->aGuestCpuIdStd[0].ecx, pCPUM->aGuestCpuIdStd[0].edx);
+ if (RT_SUCCESS(rc) && CFGMR3GetChild(pCpumCfg, "CPUID")) /* 2nd override, now discontinued. */
+ rc = VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_NODE, RT_SRC_POS,
+ "Found unsupported configuration node '/CPUM/CPUID/'. "
+ "Please use IMachine::setCPUIDLeaf() instead.");
/*
- * Determine the default leaf.
- *
- * Intel returns values of the highest standard function, while AMD
- * returns zeros. VIA on the other hand seems to returning nothing or
- * perhaps some random garbage, we don't try to duplicate this behavior.
+ * Pre-exploded the CPUID info.
*/
- ASMCpuId(pCPUM->aGuestCpuIdStd[0].eax + 10, /** @todo r=bird: Use the host value here in case of overrides and more than 10 leaves being stripped already. */
- &pCPUM->GuestCpuIdDef.eax, &pCPUM->GuestCpuIdDef.ebx,
- &pCPUM->GuestCpuIdDef.ecx, &pCPUM->GuestCpuIdDef.edx);
+ if (RT_SUCCESS(rc))
+ rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures);
+ if (RT_FAILURE(rc))
+ {
+ RTMemFree(pCPUM->GuestInfo.paCpuIdLeavesR3);
+ pCPUM->GuestInfo.paCpuIdLeavesR3 = NULL;
+ RTMemFree(pCPUM->GuestInfo.paMsrRangesR3);
+ pCPUM->GuestInfo.paMsrRangesR3 = NULL;
+ return rc;
+ }
- /** @cfgm{/CPUM/CMPXCHG16B, boolean, false}
- * Expose CMPXCHG16B to the guest if supported by the host.
- */
- bool fCmpXchg16b;
- rc = CFGMR3QueryBoolDef(pCpumCfg, "CMPXCHG16B", &fCmpXchg16b, false); AssertRCReturn(rc, rc);
- /* Cpuid 1 & 0x80000001:
+ /* ... split this function about here ... */
+
+
+ /* Cpuid 1:
* Only report features we can support.
*
* Note! When enabling new features the Synthetic CPU and Portable CPUID
* options may require adjusting (i.e. stripping what was enabled).
*/
- pCPUM->aGuestCpuIdStd[1].edx &= X86_CPUID_FEATURE_EDX_FPU
+ PCPUMCPUIDLEAF pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,
+ 1, 0); /* Note! Must refetch when used later. */
+ AssertLogRelReturn(pStdFeatureLeaf, VERR_CPUM_IPE_2);
+ pStdFeatureLeaf->uEdx &= X86_CPUID_FEATURE_EDX_FPU
| X86_CPUID_FEATURE_EDX_VME
| X86_CPUID_FEATURE_EDX_DE
| X86_CPUID_FEATURE_EDX_PSE
@@ -895,10 +1230,10 @@ static int cpumR3CpuIdInit(PVM pVM)
//| X86_CPUID_FEATURE_EDX_TM - no thermal monitor.
//| X86_CPUID_FEATURE_EDX_PBE - no pending break enabled.
| 0;
- pCPUM->aGuestCpuIdStd[1].ecx &= 0
+ pStdFeatureLeaf->uEcx &= 0
| X86_CPUID_FEATURE_ECX_SSE3
/* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */
- | ((pVM->cCpus == 1) ? X86_CPUID_FEATURE_ECX_MONITOR : 0)
+ | ((fMonitor && pVM->cCpus == 1) ? X86_CPUID_FEATURE_ECX_MONITOR : 0)
//| X86_CPUID_FEATURE_ECX_CPLDS - no CPL qualified debug store.
//| X86_CPUID_FEATURE_ECX_VMX - not virtualized.
//| X86_CPUID_FEATURE_ECX_EST - no extended speed step.
@@ -908,6 +1243,8 @@ static int cpumR3CpuIdInit(PVM pVM)
| (fCmpXchg16b ? X86_CPUID_FEATURE_ECX_CX16 : 0)
/* ECX Bit 14 - xTPR Update Control. Processor supports changing IA32_MISC_ENABLES[bit 23]. */
//| X86_CPUID_FEATURE_ECX_TPRUPDATE
+ | (fSse41 ? X86_CPUID_FEATURE_ECX_SSE4_1 : 0)
+ | (fSse42 ? X86_CPUID_FEATURE_ECX_SSE4_2 : 0)
/* ECX Bit 21 - x2APIC support - not yet. */
// | X86_CPUID_FEATURE_ECX_X2APIC
/* ECX Bit 23 - POPCNT instruction. */
@@ -915,16 +1252,18 @@ static int cpumR3CpuIdInit(PVM pVM)
| 0;
if (pCPUM->u8PortableCpuIdLevel > 0)
{
- PORTABLE_CLEAR_BITS_WHEN(1, Std[1].eax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));
- PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);
- PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, SSE3, X86_CPUID_FEATURE_ECX_SSE3);
- PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, CX16, X86_CPUID_FEATURE_ECX_CX16);
- PORTABLE_DISABLE_FEATURE_BIT(2, Std[1].edx, SSE2, X86_CPUID_FEATURE_EDX_SSE2);
- PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, SSE, X86_CPUID_FEATURE_EDX_SSE);
- PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);
- PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, CMOV, X86_CPUID_FEATURE_EDX_CMOV);
-
- Assert(!(pCPUM->aGuestCpuIdStd[1].edx & ( X86_CPUID_FEATURE_EDX_SEP
+ PORTABLE_CLEAR_BITS_WHEN(1, pStdFeatureLeaf->uEax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));
+ PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);
+ PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSE3, X86_CPUID_FEATURE_ECX_SSE3);
+ PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSE4_1, X86_CPUID_FEATURE_ECX_SSE4_1);
+ PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSE4_2, X86_CPUID_FEATURE_ECX_SSE4_2);
+ PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, CX16, X86_CPUID_FEATURE_ECX_CX16);
+ PORTABLE_DISABLE_FEATURE_BIT(2, pStdFeatureLeaf->uEdx, SSE2, X86_CPUID_FEATURE_EDX_SSE2);
+ PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, SSE, X86_CPUID_FEATURE_EDX_SSE);
+ PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);
+ PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CMOV, X86_CPUID_FEATURE_EDX_CMOV);
+
+ Assert(!(pStdFeatureLeaf->uEdx & ( X86_CPUID_FEATURE_EDX_SEP
| X86_CPUID_FEATURE_EDX_PSN
| X86_CPUID_FEATURE_EDX_DS
| X86_CPUID_FEATURE_EDX_ACPI
@@ -932,7 +1271,7 @@ static int cpumR3CpuIdInit(PVM pVM)
| X86_CPUID_FEATURE_EDX_TM
| X86_CPUID_FEATURE_EDX_PBE
)));
- Assert(!(pCPUM->aGuestCpuIdStd[1].ecx & ( X86_CPUID_FEATURE_ECX_PCLMUL
+ Assert(!(pStdFeatureLeaf->uEcx & ( X86_CPUID_FEATURE_ECX_PCLMUL
| X86_CPUID_FEATURE_ECX_DTES64
| X86_CPUID_FEATURE_ECX_CPLDS
| X86_CPUID_FEATURE_ECX_VMX
@@ -962,7 +1301,11 @@ static int cpumR3CpuIdInit(PVM pVM)
*
* ASSUMES that this is ALWAYS the AMD defined feature set if present.
*/
- pCPUM->aGuestCpuIdExt[1].edx &= X86_CPUID_AMD_FEATURE_EDX_FPU
+ PCPUMCPUIDLEAF pExtFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,
+ UINT32_C(0x80000001), 0); /* Note! Must refetch when used later. */
+ if (pExtFeatureLeaf)
+ {
+ pExtFeatureLeaf->uEdx &= X86_CPUID_AMD_FEATURE_EDX_FPU
| X86_CPUID_AMD_FEATURE_EDX_VME
| X86_CPUID_AMD_FEATURE_EDX_DE
| X86_CPUID_AMD_FEATURE_EDX_PSE
@@ -991,7 +1334,7 @@ static int cpumR3CpuIdInit(PVM pVM)
| X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX
| X86_CPUID_AMD_FEATURE_EDX_3DNOW
| 0;
- pCPUM->aGuestCpuIdExt[1].ecx &= 0
+ pExtFeatureLeaf->uEcx &= 0
//| X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF
//| X86_CPUID_AMD_FEATURE_ECX_CMPL
//| X86_CPUID_AMD_FEATURE_ECX_SVM - not virtualized.
@@ -1008,113 +1351,54 @@ static int cpumR3CpuIdInit(PVM pVM)
//| X86_CPUID_AMD_FEATURE_ECX_SKINIT
//| X86_CPUID_AMD_FEATURE_ECX_WDT
| 0;
- if (pCPUM->u8PortableCpuIdLevel > 0)
- {
- PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].ecx, CR8L, X86_CPUID_AMD_FEATURE_ECX_CR8L);
- PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
- PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW_EX, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
- PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, FFXSR, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
- PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, RDTSCP, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
- PORTABLE_DISABLE_FEATURE_BIT(2, Ext[1].ecx, LAHF_SAHF, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
- PORTABLE_DISABLE_FEATURE_BIT(3, Ext[1].ecx, CMOV, X86_CPUID_AMD_FEATURE_EDX_CMOV);
-
- Assert(!(pCPUM->aGuestCpuIdExt[1].ecx & ( X86_CPUID_AMD_FEATURE_ECX_CMPL
- | X86_CPUID_AMD_FEATURE_ECX_SVM
- | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
- | X86_CPUID_AMD_FEATURE_ECX_CR8L
- | X86_CPUID_AMD_FEATURE_ECX_ABM
- | X86_CPUID_AMD_FEATURE_ECX_SSE4A
- | X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
- | X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
- | X86_CPUID_AMD_FEATURE_ECX_OSVW
- | X86_CPUID_AMD_FEATURE_ECX_IBS
- | X86_CPUID_AMD_FEATURE_ECX_SSE5
- | X86_CPUID_AMD_FEATURE_ECX_SKINIT
- | X86_CPUID_AMD_FEATURE_ECX_WDT
- | UINT32_C(0xffffc000)
- )));
- Assert(!(pCPUM->aGuestCpuIdExt[1].edx & ( RT_BIT(10)
- | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
- | RT_BIT(18)
- | RT_BIT(19)
- | RT_BIT(21)
- | X86_CPUID_AMD_FEATURE_EDX_AXMMX
- | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
- | RT_BIT(28)
- )));
- }
-
- /*
- * Apply the Synthetic CPU modifications. (TODO: move this up)
- */
- if (pCPUM->fSyntheticCpu)
- {
- static const char s_szVendor[13] = "VirtualBox ";
- static const char s_szProcessor[48] = "VirtualBox SPARCx86 Processor v1000 "; /* includes null terminator */
-
- pCPUM->enmGuestCpuVendor = CPUMCPUVENDOR_SYNTHETIC;
-
- /* Limit the nr of standard leaves; 5 for monitor/mwait */
- pCPUM->aGuestCpuIdStd[0].eax = RT_MIN(pCPUM->aGuestCpuIdStd[0].eax, 5);
-
- /* 0: Vendor */
- pCPUM->aGuestCpuIdStd[0].ebx = pCPUM->aGuestCpuIdExt[0].ebx = ((uint32_t *)s_szVendor)[0];
- pCPUM->aGuestCpuIdStd[0].ecx = pCPUM->aGuestCpuIdExt[0].ecx = ((uint32_t *)s_szVendor)[2];
- pCPUM->aGuestCpuIdStd[0].edx = pCPUM->aGuestCpuIdExt[0].edx = ((uint32_t *)s_szVendor)[1];
-
- /* 1.eax: Version information. family : model : stepping */
- pCPUM->aGuestCpuIdStd[1].eax = (0xf << 8) + (0x1 << 4) + 1;
-
- /* Leaves 2 - 4 are Intel only - zero them out */
- memset(&pCPUM->aGuestCpuIdStd[2], 0, sizeof(pCPUM->aGuestCpuIdStd[2]));
- memset(&pCPUM->aGuestCpuIdStd[3], 0, sizeof(pCPUM->aGuestCpuIdStd[3]));
- memset(&pCPUM->aGuestCpuIdStd[4], 0, sizeof(pCPUM->aGuestCpuIdStd[4]));
-
- /* Leaf 5 = monitor/mwait */
-
- /* Limit the nr of extended leaves: 0x80000008 to include the max virtual and physical address size (64 bits guests). */
- pCPUM->aGuestCpuIdExt[0].eax = RT_MIN(pCPUM->aGuestCpuIdExt[0].eax, 0x80000008);
- /* AMD only - set to zero. */
- pCPUM->aGuestCpuIdExt[0].ebx = pCPUM->aGuestCpuIdExt[0].ecx = pCPUM->aGuestCpuIdExt[0].edx = 0;
-
- /* 0x800000001: shared feature bits are set dynamically. */
- memset(&pCPUM->aGuestCpuIdExt[1], 0, sizeof(pCPUM->aGuestCpuIdExt[1]));
-
- /* 0x800000002-4: Processor Name String Identifier. */
- pCPUM->aGuestCpuIdExt[2].eax = ((uint32_t *)s_szProcessor)[0];
- pCPUM->aGuestCpuIdExt[2].ebx = ((uint32_t *)s_szProcessor)[1];
- pCPUM->aGuestCpuIdExt[2].ecx = ((uint32_t *)s_szProcessor)[2];
- pCPUM->aGuestCpuIdExt[2].edx = ((uint32_t *)s_szProcessor)[3];
- pCPUM->aGuestCpuIdExt[3].eax = ((uint32_t *)s_szProcessor)[4];
- pCPUM->aGuestCpuIdExt[3].ebx = ((uint32_t *)s_szProcessor)[5];
- pCPUM->aGuestCpuIdExt[3].ecx = ((uint32_t *)s_szProcessor)[6];
- pCPUM->aGuestCpuIdExt[3].edx = ((uint32_t *)s_szProcessor)[7];
- pCPUM->aGuestCpuIdExt[4].eax = ((uint32_t *)s_szProcessor)[8];
- pCPUM->aGuestCpuIdExt[4].ebx = ((uint32_t *)s_szProcessor)[9];
- pCPUM->aGuestCpuIdExt[4].ecx = ((uint32_t *)s_szProcessor)[10];
- pCPUM->aGuestCpuIdExt[4].edx = ((uint32_t *)s_szProcessor)[11];
-
- /* 0x800000005-7 - reserved -> zero */
- memset(&pCPUM->aGuestCpuIdExt[5], 0, sizeof(pCPUM->aGuestCpuIdExt[5]));
- memset(&pCPUM->aGuestCpuIdExt[6], 0, sizeof(pCPUM->aGuestCpuIdExt[6]));
- memset(&pCPUM->aGuestCpuIdExt[7], 0, sizeof(pCPUM->aGuestCpuIdExt[7]));
-
- /* 0x800000008: only the max virtual and physical address size. */
- pCPUM->aGuestCpuIdExt[8].ecx = pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0; /* reserved */
+ if (pCPUM->u8PortableCpuIdLevel > 0)
+ {
+ PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEcx, CR8L, X86_CPUID_AMD_FEATURE_ECX_CR8L);
+ PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
+ PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW_EX, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
+ PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, FFXSR, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
+ PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, RDTSCP, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
+ PORTABLE_DISABLE_FEATURE_BIT(2, pExtFeatureLeaf->uEcx, LAHF_SAHF, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
+ PORTABLE_DISABLE_FEATURE_BIT(3, pExtFeatureLeaf->uEcx, CMOV, X86_CPUID_AMD_FEATURE_EDX_CMOV);
+
+ Assert(!(pExtFeatureLeaf->uEcx & ( X86_CPUID_AMD_FEATURE_ECX_CMPL
+ | X86_CPUID_AMD_FEATURE_ECX_SVM
+ | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
+ | X86_CPUID_AMD_FEATURE_ECX_CR8L
+ | X86_CPUID_AMD_FEATURE_ECX_ABM
+ | X86_CPUID_AMD_FEATURE_ECX_SSE4A
+ | X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
+ | X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
+ | X86_CPUID_AMD_FEATURE_ECX_OSVW
+ | X86_CPUID_AMD_FEATURE_ECX_IBS
+ | X86_CPUID_AMD_FEATURE_ECX_SSE5
+ | X86_CPUID_AMD_FEATURE_ECX_SKINIT
+ | X86_CPUID_AMD_FEATURE_ECX_WDT
+ | UINT32_C(0xffffc000)
+ )));
+ Assert(!(pExtFeatureLeaf->uEdx & ( RT_BIT(10)
+ | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
+ | RT_BIT(18)
+ | RT_BIT(19)
+ | RT_BIT(21)
+ | X86_CPUID_AMD_FEATURE_EDX_AXMMX
+ | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
+ | RT_BIT(28)
+ )));
+ }
}
/*
* Hide HTT, multicode, SMP, whatever.
* (APIC-ID := 0 and #LogCpus := 0)
*/
- pCPUM->aGuestCpuIdStd[1].ebx &= 0x0000ffff;
+ pStdFeatureLeaf->uEbx &= 0x0000ffff;
#ifdef VBOX_WITH_MULTI_CORE
- if ( pCPUM->enmGuestCpuVendor != CPUMCPUVENDOR_SYNTHETIC
- && pVM->cCpus > 1)
+ if (pVM->cCpus > 1)
{
/* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU core times the number of CPU cores per processor */
- pCPUM->aGuestCpuIdStd[1].ebx |= (pVM->cCpus << 16);
- pCPUM->aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_HTT; /* necessary for hyper-threading *or* multi-core CPUs */
+ pStdFeatureLeaf->uEbx |= (pVM->cCpus << 16);
+ pStdFeatureLeaf->uEdx |= X86_CPUID_FEATURE_EDX_HTT; /* necessary for hyper-threading *or* multi-core CPUs */
}
#endif
@@ -1124,12 +1408,13 @@ static int cpumR3CpuIdInit(PVM pVM)
* VIA: Reserved
* Safe to expose; restrict the number of calls to 1 for the portable case.
*/
- if ( pCPUM->u8PortableCpuIdLevel > 0
- && pCPUM->aGuestCpuIdStd[0].eax >= 2
- && (pCPUM->aGuestCpuIdStd[2].eax & 0xff) > 1)
+ PCPUMCPUIDLEAF pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 2, 0);
+ if ( pCPUM->u8PortableCpuIdLevel > 0
+ && pCurLeaf
+ && (pCurLeaf->uEax & 0xff) > 1)
{
- LogRel(("PortableCpuId: Std[2].al: %d -> 1\n", pCPUM->aGuestCpuIdStd[2].eax & 0xff));
- pCPUM->aGuestCpuIdStd[2].eax &= UINT32_C(0xfffffffe);
+ LogRel(("PortableCpuId: Std[2].al: %d -> 1\n", pCurLeaf->uEax & 0xff));
+ pCurLeaf->uEax &= UINT32_C(0xfffffffe);
}
/* Cpuid 3:
@@ -1139,11 +1424,14 @@ static int cpumR3CpuIdInit(PVM pVM)
* VIA: Reserved
* Safe to expose
*/
- if (!(pCPUM->aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PSN))
+ pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 3, 0);
+ pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1, 0);
+ if ( !(pStdFeatureLeaf->uEdx & X86_CPUID_FEATURE_EDX_PSN)
+ && pCurLeaf)
{
- pCPUM->aGuestCpuIdStd[3].ecx = pCPUM->aGuestCpuIdStd[3].edx = 0;
+ pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
if (pCPUM->u8PortableCpuIdLevel > 0)
- pCPUM->aGuestCpuIdStd[3].eax = pCPUM->aGuestCpuIdStd[3].ebx = 0;
+ pCurLeaf->uEax = pCurLeaf->uEbx = 0;
}
/* Cpuid 4:
@@ -1156,18 +1444,31 @@ static int cpumR3CpuIdInit(PVM pVM)
* Bits 31-26: Maximum number of processor cores in this physical package**
* Note: These SMP values are constant regardless of ECX
*/
- pCPUM->aGuestCpuIdStd[4].ecx = pCPUM->aGuestCpuIdStd[4].edx = 0;
- pCPUM->aGuestCpuIdStd[4].eax = pCPUM->aGuestCpuIdStd[4].ebx = 0;
-#ifdef VBOX_WITH_MULTI_CORE
- if ( pVM->cCpus > 1
- && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
+ CPUMCPUIDLEAF NewLeaf;
+ pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 4, 0);
+ if (pCurLeaf)
{
- AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
- /* One logical processor with possibly multiple cores. */
- /* See http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
- pCPUM->aGuestCpuIdStd[4].eax |= ((pVM->cCpus - 1) << 26); /* 6 bits only -> 64 cores! */
- }
+ NewLeaf.uLeaf = 4;
+ NewLeaf.uSubLeaf = 0;
+ NewLeaf.fSubLeafMask = 0;
+ NewLeaf.uEax = 0;
+ NewLeaf.uEbx = 0;
+ NewLeaf.uEcx = 0;
+ NewLeaf.uEdx = 0;
+ NewLeaf.fFlags = 0;
+#ifdef VBOX_WITH_MULTI_CORE
+ if ( pVM->cCpus > 1
+ && pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
+ {
+ AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
+ /* One logical processor with possibly multiple cores. */
+ /* See http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
+ NewLeaf.uEax |= ((pVM->cCpus - 1) << 26); /* 6 bits only -> 64 cores! */
+ }
#endif
+ rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);
+ AssertLogRelRCReturn(rc, rc);
+ }
/* Cpuid 5: Monitor/mwait Leaf
* Intel: ECX, EDX - reserved
@@ -1178,34 +1479,33 @@ static int cpumR3CpuIdInit(PVM pVM)
* VIA: Reserved
* Safe to expose
*/
- if (!(pCPUM->aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR))
- pCPUM->aGuestCpuIdStd[5].eax = pCPUM->aGuestCpuIdStd[5].ebx = 0;
-
- pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0;
- /** @cfgm{/CPUM/MWaitExtensions, boolean, false}
- * Expose MWAIT extended features to the guest. For now we expose
- * just MWAIT break on interrupt feature (bit 1).
- */
- bool fMWaitExtensions;
- rc = CFGMR3QueryBoolDef(pCpumCfg, "MWaitExtensions", &fMWaitExtensions, false); AssertRCReturn(rc, rc);
- if (fMWaitExtensions)
+ pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 5, 0);
+ if (pCurLeaf)
{
- pCPUM->aGuestCpuIdStd[5].ecx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
- /* @todo: for now we just expose host's MWAIT C-states, although conceptually
- it shall be part of our power management virtualization model */
+ pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1, 0);
+ if (!(pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_MONITOR))
+ pCurLeaf->uEax = pCurLeaf->uEbx = 0;
+
+ pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
+ if (fMWaitExtensions)
+ {
+ pCurLeaf->uEcx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
+ /** @todo: for now we just expose host's MWAIT C-states, although conceptually
+ it shall be part of our power management virtualization model */
#if 0
- /* MWAIT sub C-states */
- pCPUM->aGuestCpuIdStd[5].edx =
- (0 << 0) /* 0 in C0 */ |
- (2 << 4) /* 2 in C1 */ |
- (2 << 8) /* 2 in C2 */ |
- (2 << 12) /* 2 in C3 */ |
- (0 << 16) /* 0 in C4 */
- ;
+ /* MWAIT sub C-states */
+ pCurLeaf->uEdx =
+ (0 << 0) /* 0 in C0 */ |
+ (2 << 4) /* 2 in C1 */ |
+ (2 << 8) /* 2 in C2 */ |
+ (2 << 12) /* 2 in C3 */ |
+ (0 << 16) /* 0 in C4 */
+ ;
#endif
+ }
+ else
+ pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
}
- else
- pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0;
/* Cpuid 0x800000005 & 0x800000006 contain information about L1, L2 & L3 cache and TLB identifiers.
* Safe to pass on to the guest.
@@ -1224,16 +1524,17 @@ static int cpumR3CpuIdInit(PVM pVM)
* EDX: Advanced Power Management Information
* VIA: Reserved
*/
- if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000007))
+ pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000007), 0);
+ if (pCurLeaf)
{
- Assert(pVM->cpum.s.enmGuestCpuVendor != CPUMCPUVENDOR_INVALID);
+ Assert(pCPUM->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_INVALID);
- pCPUM->aGuestCpuIdExt[7].eax = pCPUM->aGuestCpuIdExt[7].ebx = pCPUM->aGuestCpuIdExt[7].ecx = 0;
+ pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = 0;
- if (pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
+ if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
{
/* Only expose the TSC invariant capability bit to the guest. */
- pCPUM->aGuestCpuIdExt[7].edx &= 0
+ pCurLeaf->uEdx &= 0
//| X86_CPUID_AMD_ADVPOWER_EDX_TS
//| X86_CPUID_AMD_ADVPOWER_EDX_FID
//| X86_CPUID_AMD_ADVPOWER_EDX_VID
@@ -1254,7 +1555,7 @@ static int cpumR3CpuIdInit(PVM pVM)
| 0;
}
else
- pCPUM->aGuestCpuIdExt[7].edx = 0;
+ pCurLeaf->uEdx = 0;
}
/* Cpuid 0x800000008:
@@ -1266,55 +1567,62 @@ static int cpumR3CpuIdInit(PVM pVM)
* VIA: EAX: Virtual/Physical address Size
* EBX, ECX, EDX - reserved
*/
- if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000008))
+ pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000008), 0);
+ if (pCurLeaf)
{
/* Only expose the virtual and physical address sizes to the guest. */
- pCPUM->aGuestCpuIdExt[8].eax &= UINT32_C(0x0000ffff);
- pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0; /* reserved */
+ pCurLeaf->uEax &= UINT32_C(0x0000ffff);
+ pCurLeaf->uEbx = pCurLeaf->uEdx = 0; /* reserved */
/* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu)
* NC (0-7) Number of cores; 0 equals 1 core */
- pCPUM->aGuestCpuIdExt[8].ecx = 0;
+ pCurLeaf->uEcx = 0;
#ifdef VBOX_WITH_MULTI_CORE
if ( pVM->cCpus > 1
- && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
+ && pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
{
/* Legacy method to determine the number of cores. */
- pCPUM->aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_CMPL;
- pCPUM->aGuestCpuIdExt[8].ecx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */
+ pCurLeaf->uEcx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */
+ pExtFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,
+ UINT32_C(0x80000001), 0);
+ if (pExtFeatureLeaf)
+ pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_CMPL;
}
#endif
}
- /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}
- * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from
- * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).
- * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].
- */
- bool fNt4LeafLimit;
- rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false); AssertRCReturn(rc, rc);
- if (fNt4LeafLimit)
- pCPUM->aGuestCpuIdStd[0].eax = 3; /** @todo r=bird: shouldn't we check if pCPUM->aGuestCpuIdStd[0].eax > 3 before setting it 3 here? */
/*
- * Limit it the number of entries and fill the remaining with the defaults.
+ * Limit it the number of entries, zapping the remainder.
*
* The limits are masking off stuff about power saving and similar, this
* is perhaps a bit crudely done as there is probably some relatively harmless
* info too in these leaves (like words about having a constant TSC).
*/
- if (pCPUM->aGuestCpuIdStd[0].eax > 5)
- pCPUM->aGuestCpuIdStd[0].eax = 5;
- for (i = pCPUM->aGuestCpuIdStd[0].eax + 1; i < RT_ELEMENTS(pCPUM->aGuestCpuIdStd); i++)
- pCPUM->aGuestCpuIdStd[i] = pCPUM->GuestCpuIdDef;
-
- if (pCPUM->aGuestCpuIdExt[0].eax > UINT32_C(0x80000008))
- pCPUM->aGuestCpuIdExt[0].eax = UINT32_C(0x80000008);
- for (i = pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000000)
- ? pCPUM->aGuestCpuIdExt[0].eax - UINT32_C(0x80000000) + 1
- : 0;
- i < RT_ELEMENTS(pCPUM->aGuestCpuIdExt);
- i++)
- pCPUM->aGuestCpuIdExt[i] = pCPUM->GuestCpuIdDef;
+ pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 0, 0);
+ if (pCurLeaf)
+ {
+ if (pCurLeaf->uEax > 5)
+ {
+ pCurLeaf->uEax = 5;
+ cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
+ pCurLeaf->uEax + 1, UINT32_C(0x000fffff));
+ }
+
+ /* NT4 hack, no zapping of extra leaves here. */
+ if (fNt4LeafLimit && pCurLeaf->uEax > 3)
+ pCurLeaf->uEax = 3;
+ }
+
+ pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000000), 0);
+ if (pCurLeaf)
+ {
+ if (pCurLeaf->uEax > UINT32_C(0x80000008))
+ {
+ pCurLeaf->uEax = UINT32_C(0x80000008);
+ cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
+ pCurLeaf->uEax + 1, UINT32_C(0x800fffff));
+ }
+ }
/*
* Centaur stuff (VIA).
@@ -1324,19 +1632,25 @@ static int cpumR3CpuIdInit(PVM pVM)
* let on about any of those... 0xc0000002 seems to be some
* temperature/hz/++ stuff, include it as well (static).
*/
- if ( pCPUM->aGuestCpuIdCentaur[0].eax >= UINT32_C(0xc0000000)
- && pCPUM->aGuestCpuIdCentaur[0].eax <= UINT32_C(0xc0000004))
+ pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0xc0000000), 0);
+ if (pCurLeaf)
{
- pCPUM->aGuestCpuIdCentaur[0].eax = RT_MIN(pCPUM->aGuestCpuIdCentaur[0].eax, UINT32_C(0xc0000002));
- pCPUM->aGuestCpuIdCentaur[1].edx = 0; /* all features hidden */
- for (i = pCPUM->aGuestCpuIdCentaur[0].eax - UINT32_C(0xc0000000);
- i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur);
- i++)
- pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
+ if ( pCurLeaf->uEax >= UINT32_C(0xc0000000)
+ && pCurLeaf->uEax <= UINT32_C(0xc0000004))
+ {
+ pCurLeaf->uEax = RT_MIN(pCurLeaf->uEax, UINT32_C(0xc0000002));
+ cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
+ UINT32_C(0xc0000002), UINT32_C(0xc00fffff));
+
+ pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,
+ UINT32_C(0xc0000001), 0);
+ if (pCurLeaf)
+ pCurLeaf->uEdx = 0; /* all features hidden */
+ }
+ else
+ cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
+ UINT32_C(0xc0000000), UINT32_C(0xc00fffff));
}
- else
- for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); i++)
- pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
/*
* Hypervisor identification.
@@ -1345,64 +1659,105 @@ static int cpumR3CpuIdInit(PVM pVM)
* 0x40000000 function returns 0x40000001 and identifying ourselves.
* Currently we do not support any hypervisor-specific interface.
*/
- pCPUM->aGuestCpuIdHyper[0].eax = UINT32_C(0x40000001);
- pCPUM->aGuestCpuIdHyper[0].ebx = pCPUM->aGuestCpuIdHyper[0].ecx
- = pCPUM->aGuestCpuIdHyper[0].edx = 0x786f4256; /* 'VBox' */
- pCPUM->aGuestCpuIdHyper[1].eax = 0x656e6f6e; /* 'none' */
- pCPUM->aGuestCpuIdHyper[1].ebx = pCPUM->aGuestCpuIdHyper[1].ecx
- = pCPUM->aGuestCpuIdHyper[1].edx = 0; /* Reserved */
+ NewLeaf.uLeaf = UINT32_C(0x40000000);
+ NewLeaf.uSubLeaf = 0;
+ NewLeaf.fSubLeafMask = 0;
+ NewLeaf.uEax = UINT32_C(0x40000001);
+ NewLeaf.uEbx = 0x786f4256 /* 'VBox' */;
+ NewLeaf.uEcx = 0x786f4256 /* 'VBox' */;
+ NewLeaf.uEdx = 0x786f4256 /* 'VBox' */;
+ NewLeaf.fFlags = 0;
+ rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);
+ AssertLogRelRCReturn(rc, rc);
+
+ NewLeaf.uLeaf = UINT32_C(0x40000001);
+ NewLeaf.uEax = 0x656e6f6e; /* 'none' */
+ NewLeaf.uEbx = 0;
+ NewLeaf.uEcx = 0;
+ NewLeaf.uEdx = 0;
+ NewLeaf.fFlags = 0;
+ rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);
+ AssertLogRelRCReturn(rc, rc);
/*
- * Load CPUID overrides from configuration.
- * Note: Kind of redundant now, but allows unchanged overrides
+ * Mini CPU selection support for making Mac OS X happy.
*/
- /** @cfgm{CPUM/CPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
- * Overrides the CPUID leaf values. */
- PCFGMNODE pOverrideCfg = CFGMR3GetChild(pCpumCfg, "CPUID");
- rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0], RT_ELEMENTS(pCPUM->aGuestCpuIdStd), pOverrideCfg);
- AssertRCReturn(rc, rc);
- rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0], RT_ELEMENTS(pCPUM->aGuestCpuIdExt), pOverrideCfg);
- AssertRCReturn(rc, rc);
- rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pOverrideCfg);
- AssertRCReturn(rc, rc);
+ if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
+ {
+ pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1, 0);
+ uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(ASMGetCpuStepping(pStdFeatureLeaf->uEax),
+ ASMGetCpuModelIntel(pStdFeatureLeaf->uEax),
+ ASMGetCpuFamily(pStdFeatureLeaf->uEax),
+ 0);
+ if (uMaxIntelFamilyModelStep < uCurIntelFamilyModelStep)
+ {
+ uint32_t uNew = pStdFeatureLeaf->uEax & UINT32_C(0xf0003000);
+ uNew |= RT_BYTE1(uMaxIntelFamilyModelStep) & 0xf; /* stepping */
+ uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) & 0xf) << 4; /* 4 low model bits */
+ uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) >> 4) << 16; /* 4 high model bits */
+ uNew |= (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf) << 8; /* 4 low family bits */
+ if (RT_BYTE3(uMaxIntelFamilyModelStep) > 0xf) /* 8 high family bits, using intel's suggested calculation. */
+ uNew |= ( (RT_BYTE3(uMaxIntelFamilyModelStep) - (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf)) & 0xff ) << 20;
+ LogRel(("CPU: CPUID(0).EAX %#x -> %#x (uMaxIntelFamilyModelStep=%#x, uCurIntelFamilyModelStep=%#x\n",
+ pStdFeatureLeaf->uEax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep));
+ pStdFeatureLeaf->uEax = uNew;
+ }
+ }
/*
- * Check if PAE was explicitely enabled by the user.
+ * MSR fudging.
*/
+ /** @cfgm{CPUM/FudgeMSRs, boolean, true}
+ * Fudges some common MSRs if not present in the selected CPU database entry.
+ * This is for trying to keep VMs running when moved between different hosts
+ * and different CPU vendors. */
bool fEnable;
- rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, false); AssertRCReturn(rc, rc);
+ rc = CFGMR3QueryBoolDef(pCpumCfg, "FudgeMSRs", &fEnable, true); AssertRCReturn(rc, rc);
if (fEnable)
- CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
+ {
+ rc = cpumR3MsrApplyFudge(pVM);
+ AssertLogRelRCReturn(rc, rc);
+ }
/*
- * We don't normally enable NX for raw-mode, so give the user a chance to
- * force it on.
+ * Move the MSR and CPUID arrays over on the hypervisor heap, and explode
+ * guest CPU features again.
*/
+ void *pvFree = pCPUM->GuestInfo.paCpuIdLeavesR3;
+ int rc1 = cpumR3CpuIdInstallAndExplodeLeaves(pVM, pCPUM, pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves);
+ RTMemFree(pvFree);
+
+ pvFree = pCPUM->GuestInfo.paMsrRangesR3;
+ int rc2 = MMHyperDupMem(pVM, pvFree,
+ sizeof(pCPUM->GuestInfo.paMsrRangesR3[0]) * pCPUM->GuestInfo.cMsrRanges, 32,
+ MM_TAG_CPUM_MSRS, (void **)&pCPUM->GuestInfo.paMsrRangesR3);
+ RTMemFree(pvFree);
+ AssertLogRelRCReturn(rc1, rc1);
+ AssertLogRelRCReturn(rc2, rc2);
+
+ pCPUM->GuestInfo.paMsrRangesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paMsrRangesR3);
+ pCPUM->GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paMsrRangesR3);
+ cpumR3MsrRegStats(pVM);
+
+ /*
+ * Some more configuration that we're applying at the end of everything
+ * via the CPUMSetGuestCpuIdFeature API.
+ */
+
+ /* Check if PAE was explicitely enabled by the user. */
+ rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, false); AssertRCReturn(rc, rc);
+ if (fEnable)
+ CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
+
+ /* We don't normally enable NX for raw-mode, so give the user a chance to force it on. */
rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, false); AssertRCReturn(rc, rc);
if (fEnable)
CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
- /*
- * We don't enable the Hypervisor Present bit by default, but it may
- * be needed by some guests.
- */
+ /* We don't enable the Hypervisor Present bit by default, but it may be needed by some guests. */
rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableHVP", &fEnable, false); AssertRCReturn(rc, rc);
if (fEnable)
CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_HVP);
- /*
- * Log the cpuid and we're good.
- */
- bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
- RTCPUSET OnlineSet;
- LogRel(("Logical host processors: %u present, %u max, %u online, online mask: %016RX64\n",
- (unsigned)RTMpGetPresentCount(), (unsigned)RTMpGetCount(), (unsigned)RTMpGetOnlineCount(),
- RTCpuSetToU64(RTMpGetOnlineSet(&OnlineSet)) ));
- LogRel(("************************* CPUID dump ************************\n"));
- DBGFR3Info(pVM, "cpuid", "verbose", DBGFR3InfoLogRelHlp());
- LogRel(("\n"));
- DBGFR3InfoLog(pVM, "cpuid", "verbose"); /* macro */
- RTLogRelSetBuffering(fOldBuffered);
- LogRel(("******************** End of CPUID dump **********************\n"));
#undef PORTABLE_DISABLE_FEATURE_BIT
#undef PORTABLE_CLEAR_BITS_WHEN
@@ -1423,7 +1778,13 @@ static int cpumR3CpuIdInit(PVM pVM)
VMMR3DECL(void) CPUMR3Relocate(PVM pVM)
{
LogFlow(("CPUMR3Relocate\n"));
- /* nothing to do any more. */
+
+ pVM->cpum.s.GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paMsrRangesR3);
+ pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
+
+ /* Recheck the guest DRx values in raw-mode. */
+ for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
+ CPUMRecalcHyperDRx(&pVM->aCpus[iCpu], UINT8_MAX, false);
}
@@ -1484,12 +1845,15 @@ VMMR3DECL(int) CPUMR3Term(PVM pVM)
*
* Used by CPUMR3Reset and CPU hot plugging.
*
- * @param pVCpu Pointer to the VMCPU.
+ * @param pVM Pointer to the cross context VM structure.
+ * @param pVCpu Pointer to the cross context virtual CPU structure of
+ * the CPU that is being reset. This may differ from the
+ * current EMT.
*/
-VMMR3DECL(void) CPUMR3ResetCpu(PVMCPU pVCpu)
+VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu)
{
/** @todo anything different for VCPU > 0? */
- PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+ PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
/*
* Initialize everything to ZERO first.
@@ -1567,13 +1931,50 @@ VMMR3DECL(void) CPUMR3ResetCpu(PVMCPU pVCpu)
pCtx->fpu.MXCSR_MASK = 0xffff; /** @todo REM always changed this for us. Should probably check if the HW really
supports all bits, since a zero value here should be read as 0xffbf. */
+ /*
+ * MSRs.
+ */
/* Init PAT MSR */
pCtx->msrPAT = UINT64_C(0x0007040600070406); /** @todo correct? */
- /* Reset EFER; see AMD64 Architecture Programmer's Manual Volume 2: Table 14-1. Initial Processor State
- * The Intel docs don't mention it.
- */
- pCtx->msrEFER = 0;
+ /* EFER MBZ; see AMD64 Architecture Programmer's Manual Volume 2: Table 14-1. Initial Processor State.
+ * The Intel docs don't mention it. */
+ Assert(!pCtx->msrEFER);
+
+ /* IA32_MISC_ENABLE - not entirely sure what the init/reset state really
+ is supposed to be here, just trying provide useful/sensible values. */
+ PCPUMMSRRANGE pRange = cpumLookupMsrRange(pVM, MSR_IA32_MISC_ENABLE);
+ if (pRange)
+ {
+ pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = MSR_IA32_MISC_ENABLE_BTS_UNAVAIL
+ | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL
+ | (pVM->cpum.s.GuestFeatures.fMonitorMWait ? MSR_IA32_MISC_ENABLE_MONITOR : 0)
+ | MSR_IA32_MISC_ENABLE_FAST_STRINGS;
+ pRange->fWrIgnMask |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL
+ | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
+ pRange->fWrGpMask &= ~pVCpu->cpum.s.GuestMsrs.msr.MiscEnable;
+ }
+
+ /** @todo Wire IA32_MISC_ENABLE bit 22 to our NT 4 CPUID trick. */
+
+ /** @todo r=ramshankar: Currently broken for SMP as TMCpuTickSet() expects to be
+ * called from each EMT while we're getting called by CPUMR3Reset()
+ * iteratively on the same thread. Fix later. */
+#if 0 /** @todo r=bird: This we will do in TM, not here. */
+ /* TSC must be 0. Intel spec. Table 9-1. "IA-32 Processor States Following Power-up, Reset, or INIT." */
+ CPUMSetGuestMsr(pVCpu, MSR_IA32_TSC, 0);
+#endif
+
+
+ /* C-state control. Guesses. */
+ pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = 1 /*C1*/ | RT_BIT_32(25) | RT_BIT_32(26) | RT_BIT_32(27) | RT_BIT_32(28);
+
+
+ /*
+ * Get the APIC base MSR from the APIC device. For historical reasons (saved state), the APIC base
+ * continues to reside in the APIC device and we cache it here in the VCPU for all further accesses.
+ */
+ PDMApicGetBase(pVCpu, &pCtx->msrApicBase);
}
@@ -1587,10 +1988,10 @@ VMMR3DECL(void) CPUMR3Reset(PVM pVM)
{
for (VMCPUID i = 0; i < pVM->cCpus; i++)
{
- CPUMR3ResetCpu(&pVM->aCpus[i]);
+ CPUMR3ResetCpu(pVM, &pVM->aCpus[i]);
#ifdef VBOX_WITH_CRASHDUMP_MAGIC
- PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(&pVM->aCpus[i]);
+ PCPUMCTX pCtx = &pVM->aCpus[i].cpum.s.Guest;
/* Magic marker for searching in crash dumps. */
strcpy((char *)pVM->aCpus[i].cpum.s.aMagic, "CPUMCPU Magic");
@@ -1630,18 +2031,73 @@ static void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM)
*/
CPUMCPUID aRawStd[16];
for (unsigned i = 0; i < RT_ELEMENTS(aRawStd); i++)
- ASMCpuId(i, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
+ ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
SSMR3PutU32(pSSM, RT_ELEMENTS(aRawStd));
SSMR3PutMem(pSSM, &aRawStd[0], sizeof(aRawStd));
CPUMCPUID aRawExt[32];
for (unsigned i = 0; i < RT_ELEMENTS(aRawExt); i++)
- ASMCpuId(i | UINT32_C(0x80000000), &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
+ ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt));
SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt));
}
+static int cpumR3LoadCpuIdOneGuestArray(PSSMHANDLE pSSM, uint32_t uBase, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
+{
+ uint32_t cCpuIds;
+ int rc = SSMR3GetU32(pSSM, &cCpuIds);
+ if (RT_SUCCESS(rc))
+ {
+ if (cCpuIds < 64)
+ {
+ for (uint32_t i = 0; i < cCpuIds; i++)
+ {
+ CPUMCPUID CpuId;
+ rc = SSMR3GetMem(pSSM, &CpuId, sizeof(CpuId));
+ if (RT_FAILURE(rc))
+ break;
+
+ CPUMCPUIDLEAF NewLeaf;
+ NewLeaf.uLeaf = uBase + i;
+ NewLeaf.uSubLeaf = 0;
+ NewLeaf.fSubLeafMask = 0;
+ NewLeaf.uEax = CpuId.eax;
+ NewLeaf.uEbx = CpuId.ebx;
+ NewLeaf.uEcx = CpuId.ecx;
+ NewLeaf.uEdx = CpuId.edx;
+ NewLeaf.fFlags = 0;
+ rc = cpumR3CpuIdInsert(ppaLeaves, pcLeaves, &NewLeaf);
+ }
+ }
+ else
+ rc = VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
+ }
+ if (RT_FAILURE(rc))
+ {
+ RTMemFree(*ppaLeaves);
+ *ppaLeaves = NULL;
+ *pcLeaves = 0;
+ }
+ return rc;
+}
+
+
+static int cpumR3LoadCpuIdGuestArrays(PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
+{
+ *ppaLeaves = NULL;
+ *pcLeaves = 0;
+
+ int rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x00000000), ppaLeaves, pcLeaves);
+ if (RT_SUCCESS(rc))
+ rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x80000000), ppaLeaves, pcLeaves);
+ if (RT_SUCCESS(rc))
+ rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0xc0000000), ppaLeaves, pcLeaves);
+
+ return rc;
+}
+
+
/**
* Loads the CPU ID leaves saved by pass 0.
*
@@ -1723,7 +2179,6 @@ static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
if ( (aGuestCpuId##set [1].reg & bit) \
&& !(aHostRaw##set [1].reg & bit) \
&& !(aHostOverride##set [1].reg & bit) \
- && !(aGuestOverride##set [1].reg & bit) \
) \
{ \
if (fStrictCpuIdChecks) \
@@ -1737,7 +2192,6 @@ static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
if ( (aGuestCpuId##set [1].reg & bit) \
&& !(aHostRaw##set [1].reg & bit) \
&& !(aHostOverride##set [1].reg & bit) \
- && !(aGuestOverride##set [1].reg & bit) \
) \
LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
} while (0)
@@ -1746,7 +2200,6 @@ static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
if ( (aGuestCpuId##set [1].reg & bit) \
&& !(aHostRaw##set [1].reg & bit) \
&& !(aHostOverride##set [1].reg & bit) \
- && !(aGuestOverride##set [1].reg & bit) \
) \
LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
} while (0)
@@ -1759,7 +2212,6 @@ static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
&& fGuestAmd \
&& (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
&& !(aHostOverride##set [1].reg & bit) \
- && !(aGuestOverride##set [1].reg & bit) \
) \
{ \
if (fStrictCpuIdChecks) \
@@ -1774,7 +2226,6 @@ static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
&& fGuestAmd \
&& (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
&& !(aHostOverride##set [1].reg & bit) \
- && !(aGuestOverride##set [1].reg & bit) \
) \
LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
} while (0)
@@ -1784,7 +2235,6 @@ static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
&& fGuestAmd \
&& (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
&& !(aHostOverride##set [1].reg & bit) \
- && !(aGuestOverride##set [1].reg & bit) \
) \
LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
} while (0)
@@ -1799,7 +2249,6 @@ static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
? aHostRawExt[1].reg & (ExtBit) \
: aHostRawStd[1].reg & (StdBit)) \
&& !(aHostOverrideExt[1].reg & (ExtBit)) \
- && !(aGuestOverrideExt[1].reg & (ExtBit)) \
) \
{ \
if (fStrictCpuIdChecks) \
@@ -1815,7 +2264,6 @@ static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
? aHostRawExt[1].reg & (ExtBit) \
: aHostRawStd[1].reg & (StdBit)) \
&& !(aHostOverrideExt[1].reg & (ExtBit)) \
- && !(aGuestOverrideExt[1].reg & (ExtBit)) \
) \
LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
} while (0)
@@ -1826,7 +2274,6 @@ static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
? aHostRawExt[1].reg & (ExtBit) \
: aHostRawStd[1].reg & (StdBit)) \
&& !(aHostOverrideExt[1].reg & (ExtBit)) \
- && !(aGuestOverrideExt[1].reg & (ExtBit)) \
) \
LogRel(("CPUM: Warning - " #ExtBit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
} while (0)
@@ -1835,26 +2282,12 @@ static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
/*
* Load them into stack buffers first.
*/
- CPUMCPUID aGuestCpuIdStd[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd)];
- uint32_t cGuestCpuIdStd;
- int rc = SSMR3GetU32(pSSM, &cGuestCpuIdStd); AssertRCReturn(rc, rc);
- if (cGuestCpuIdStd > RT_ELEMENTS(aGuestCpuIdStd))
- return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
- SSMR3GetMem(pSSM, &aGuestCpuIdStd[0], cGuestCpuIdStd * sizeof(aGuestCpuIdStd[0]));
-
- CPUMCPUID aGuestCpuIdExt[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt)];
- uint32_t cGuestCpuIdExt;
- rc = SSMR3GetU32(pSSM, &cGuestCpuIdExt); AssertRCReturn(rc, rc);
- if (cGuestCpuIdExt > RT_ELEMENTS(aGuestCpuIdExt))
- return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
- SSMR3GetMem(pSSM, &aGuestCpuIdExt[0], cGuestCpuIdExt * sizeof(aGuestCpuIdExt[0]));
+ PCPUMCPUIDLEAF paLeaves;
+ uint32_t cLeaves;
+ int rc = cpumR3LoadCpuIdGuestArrays(pSSM, uVersion, &paLeaves, &cLeaves);
+ AssertRCReturn(rc, rc);
- CPUMCPUID aGuestCpuIdCentaur[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur)];
- uint32_t cGuestCpuIdCentaur;
- rc = SSMR3GetU32(pSSM, &cGuestCpuIdCentaur); AssertRCReturn(rc, rc);
- if (cGuestCpuIdCentaur > RT_ELEMENTS(aGuestCpuIdCentaur))
- return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
- SSMR3GetMem(pSSM, &aGuestCpuIdCentaur[0], cGuestCpuIdCentaur * sizeof(aGuestCpuIdCentaur[0]));
+ /** @todo we'll be leaking paLeaves on error return... */
CPUMCPUID GuestCpuIdDef;
rc = SSMR3GetMem(pSSM, &GuestCpuIdDef, sizeof(GuestCpuIdDef));
@@ -1865,7 +2298,10 @@ static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
rc = SSMR3GetU32(pSSM, &cRawStd); AssertRCReturn(rc, rc);
if (cRawStd > RT_ELEMENTS(aRawStd))
return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
- SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));
+ rc = SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));
+ AssertRCReturn(rc, rc);
+ for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
+ ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
CPUMCPUID aRawExt[32];
uint32_t cRawExt;
@@ -1874,54 +2310,27 @@ static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
rc = SSMR3GetMem(pSSM, &aRawExt[0], cRawExt * sizeof(aRawExt[0]));
AssertRCReturn(rc, rc);
-
- /*
- * Note that we support restoring less than the current amount of standard
- * leaves because we've been allowed more is newer version of VBox.
- *
- * So, pad new entries with the default.
- */
- for (uint32_t i = cGuestCpuIdStd; i < RT_ELEMENTS(aGuestCpuIdStd); i++)
- aGuestCpuIdStd[i] = GuestCpuIdDef;
-
- for (uint32_t i = cGuestCpuIdExt; i < RT_ELEMENTS(aGuestCpuIdExt); i++)
- aGuestCpuIdExt[i] = GuestCpuIdDef;
-
- for (uint32_t i = cGuestCpuIdCentaur; i < RT_ELEMENTS(aGuestCpuIdCentaur); i++)
- aGuestCpuIdCentaur[i] = GuestCpuIdDef;
-
- for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
- ASMCpuId(i, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
-
for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++)
- ASMCpuId(i | UINT32_C(0x80000000), &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
+ ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
/*
* Get the raw CPU IDs for the current host.
*/
CPUMCPUID aHostRawStd[16];
for (unsigned i = 0; i < RT_ELEMENTS(aHostRawStd); i++)
- ASMCpuId(i, &aHostRawStd[i].eax, &aHostRawStd[i].ebx, &aHostRawStd[i].ecx, &aHostRawStd[i].edx);
+ ASMCpuIdExSlow(i, 0, 0, 0, &aHostRawStd[i].eax, &aHostRawStd[i].ebx, &aHostRawStd[i].ecx, &aHostRawStd[i].edx);
CPUMCPUID aHostRawExt[32];
for (unsigned i = 0; i < RT_ELEMENTS(aHostRawExt); i++)
- ASMCpuId(i | UINT32_C(0x80000000), &aHostRawExt[i].eax, &aHostRawExt[i].ebx, &aHostRawExt[i].ecx, &aHostRawExt[i].edx);
+ ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0,
+ &aHostRawExt[i].eax, &aHostRawExt[i].ebx, &aHostRawExt[i].ecx, &aHostRawExt[i].edx);
/*
* Get the host and guest overrides so we don't reject the state because
* some feature was enabled thru these interfaces.
* Note! We currently only need the feature leaves, so skip rest.
*/
- PCFGMNODE pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/CPUID");
- CPUMCPUID aGuestOverrideStd[2];
- memcpy(&aGuestOverrideStd[0], &aHostRawStd[0], sizeof(aGuestOverrideStd));
- cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aGuestOverrideStd[0], RT_ELEMENTS(aGuestOverrideStd), pOverrideCfg);
-
- CPUMCPUID aGuestOverrideExt[2];
- memcpy(&aGuestOverrideExt[0], &aHostRawExt[0], sizeof(aGuestOverrideExt));
- cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &aGuestOverrideExt[0], RT_ELEMENTS(aGuestOverrideExt), pOverrideCfg);
-
- pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID");
+ PCFGMNODE pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID");
CPUMCPUID aHostOverrideStd[2];
memcpy(&aHostOverrideStd[0], &aHostRawStd[0], sizeof(aHostOverrideStd));
cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aHostOverrideStd[0], RT_ELEMENTS(aHostOverrideStd), pOverrideCfg);
@@ -1942,7 +2351,7 @@ static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
* For raw-mode we'll require that the CPUs are very similar since we don't
* intercept CPUID instructions for user mode applications.
*/
- if (!HWACCMIsEnabled(pVM))
+ if (!HMIsEnabled(pVM))
{
/* CPUID(0) */
CPUID_CHECK_RET( aHostRawStd[0].ebx == aRawStd[0].ebx
@@ -2172,6 +2581,10 @@ static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
* "EMU" - Possible to emulate, could be lots of work and very slow.
* "EMU?" - Can this be emulated?
*/
+ CPUMCPUID aGuestCpuIdStd[2];
+ RT_ZERO(aGuestCpuIdStd);
+ cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, 1, 0, &aGuestCpuIdStd[1]);
+
/* CPUID(1).ecx */
CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3); // -> EMU
CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCLMUL); // -> EMU?
@@ -2241,8 +2654,9 @@ static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_PBE); // -> EMU?
/* CPUID(0x80000000). */
- if ( aGuestCpuIdExt[0].eax >= UINT32_C(0x80000001)
- && aGuestCpuIdExt[0].eax < UINT32_C(0x8000007f))
+ CPUMCPUID aGuestCpuIdExt[2];
+ RT_ZERO(aGuestCpuIdExt);
+ if (cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, UINT32_C(0x80000001), 0, &aGuestCpuIdExt[1]))
{
/** @todo deal with no 0x80000001 on the host. */
bool const fHostAmd = ASMIsAmdCpuEx(aHostRawStd[0].ebx, aHostRawStd[0].ecx, aHostRawStd[0].edx);
@@ -2320,10 +2734,14 @@ static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
/*
* We're good, commit the CPU ID leaves.
*/
- memcpy(&pVM->cpum.s.aGuestCpuIdStd[0], &aGuestCpuIdStd[0], sizeof(aGuestCpuIdStd));
- memcpy(&pVM->cpum.s.aGuestCpuIdExt[0], &aGuestCpuIdExt[0], sizeof(aGuestCpuIdExt));
- memcpy(&pVM->cpum.s.aGuestCpuIdCentaur[0], &aGuestCpuIdCentaur[0], sizeof(aGuestCpuIdCentaur));
- pVM->cpum.s.GuestCpuIdDef = GuestCpuIdDef;
+ MMHyperFree(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
+ pVM->cpum.s.GuestInfo.paCpuIdLeavesR0 = NIL_RTR0PTR;
+ pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = NIL_RTRCPTR;
+ pVM->cpum.s.GuestInfo.DefCpuId = GuestCpuIdDef;
+ rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, &pVM->cpum.s, paLeaves, cLeaves);
+ RTMemFree(paLeaves);
+ AssertLogRelRCReturn(rc, rc);
+
#undef CPUID_CHECK_RET
#undef CPUID_CHECK_WRN
@@ -2500,6 +2918,13 @@ static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVers
SSMR3GetMem(pSSM, &pVCpu->cpum.s.GuestMsrs.au64[0], 2 * sizeof(uint64_t)); /* Restore two MSRs. */
SSMR3Skip(pSSM, 62 * sizeof(uint64_t));
}
+
+ /* REM and other may have cleared must-be-one fields in DR6 and
+ DR7, fix these. */
+ pVCpu->cpum.s.Guest.dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);
+ pVCpu->cpum.s.Guest.dr[6] |= X86_DR6_RA1_MASK;
+ pVCpu->cpum.s.Guest.dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
+ pVCpu->cpum.s.Guest.dr[7] |= X86_DR7_RA1_MASK;
}
/* Older states does not have the internal selector register flags
@@ -2509,7 +2934,7 @@ static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVers
for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
{
PVMCPU pVCpu = &pVM->aCpus[iCpu];
- bool const fValid = HWACCMIsEnabled(pVM)
+ bool const fValid = HMIsEnabled(pVM)
|| ( uVersion > CPUM_SAVED_STATE_VERSION_VER3_2
&& !(pVCpu->cpum.s.fChanged & CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID));
PCPUMSELREG paSelReg = CPUMCTX_FIRST_SREG(&pVCpu->cpum.s.Guest);
@@ -2604,8 +3029,8 @@ static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVers
*/
/** @todo we should check the 64 bits capabilities too! */
uint32_t au32CpuId[8] = {0,0,0,0, 0,0,0,0};
- ASMCpuId(0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
- ASMCpuId(1, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
+ ASMCpuIdExSlow(0, 0, 0, 0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
+ ASMCpuIdExSlow(1, 0, 0, 0, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
uint32_t au32CpuIdSaved[8];
rc = SSMR3GetMem(pSSM, &au32CpuIdSaved[0], sizeof(au32CpuIdSaved));
if (RT_SUCCESS(rc))
@@ -2701,9 +3126,14 @@ static DECLCALLBACK(int) cpumR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
return VERR_INTERNAL_ERROR_2;
}
- /* Notify PGM of the NXE states in case they've changed. */
for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
+ {
+ /* Notify PGM of the NXE states in case they've changed. */
PGMNotifyNxeChanged(&pVM->aCpus[iCpu], !!(pVM->aCpus[iCpu].cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE));
+
+ /* Cache the local APIC base from the APIC device. During init. this is done in CPUMR3ResetCpu(). */
+ PDMApicGetBase(&pVM->aCpus[iCpu], &pVM->aCpus[iCpu].cpum.s.Guest.msrApicBase);
+ }
return VINF_SUCCESS;
}
@@ -3021,17 +3451,17 @@ static void cpumR3InfoParseArg(const char *pszArgs, CPUMDUMPTYPE *penmType, cons
}
else
{
- if (!strncmp(pszArgs, "verbose", sizeof("verbose") - 1))
+ if (!strncmp(pszArgs, RT_STR_TUPLE("verbose")))
{
pszArgs += 7;
*penmType = CPUMDUMPTYPE_VERBOSE;
}
- else if (!strncmp(pszArgs, "terse", sizeof("terse") - 1))
+ else if (!strncmp(pszArgs, RT_STR_TUPLE("terse")))
{
pszArgs += 5;
*penmType = CPUMDUMPTYPE_TERSE;
}
- else if (!strncmp(pszArgs, "default", sizeof("default") - 1))
+ else if (!strncmp(pszArgs, RT_STR_TUPLE("default")))
{
pszArgs += 7;
*penmType = CPUMDUMPTYPE_DEFAULT;
@@ -3063,7 +3493,7 @@ static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const cha
pHlp->pfnPrintf(pHlp, "Guest CPUM (VCPU %d) state: %s\n", pVCpu->idCpu, pszComment);
- PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+ PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
cpumR3InfoOne(pVM, pCtx, CPUMCTX2CORE(pCtx), pHlp, enmType, "");
}
@@ -3085,9 +3515,9 @@ static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, cons
pVCpu = &pVM->aCpus[0];
char szInstruction[256];
- int rc = DBGFR3DisasInstrCurrent(pVCpu, szInstruction, sizeof(szInstruction));
- if (RT_SUCCESS(rc))
- pHlp->pfnPrintf(pHlp, "\nCPUM: %s\n\n", szInstruction);
+ szInstruction[0] = '\0';
+ DBGFR3DisasInstrCurrent(pVCpu, szInstruction, sizeof(szInstruction));
+ pHlp->pfnPrintf(pHlp, "\nCPUM: %s\n\n", szInstruction);
}
@@ -3282,20 +3712,37 @@ static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const cha
CPUMCPUID Guest;
unsigned cStdMax = pVM->cpum.s.aGuestCpuIdStd[0].eax;
+ uint32_t cStdHstMax;
+ uint32_t dummy;
+ ASMCpuIdExSlow(0, 0, 0, 0, &cStdHstMax, &dummy, &dummy, &dummy);
+
+ unsigned cStdLstMax = RT_MAX(RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd), cStdHstMax);
+
pHlp->pfnPrintf(pHlp,
" RAW Standard CPUIDs\n"
" Function eax ebx ecx edx\n");
- for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd); i++)
+ for (unsigned i = 0; i <= cStdLstMax ; i++)
{
- Guest = pVM->cpum.s.aGuestCpuIdStd[i];
- ASMCpuId_Idx_ECX(i, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
+ if (i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
+ {
+ Guest = pVM->cpum.s.aGuestCpuIdStd[i];
+ ASMCpuIdExSlow(i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
- pHlp->pfnPrintf(pHlp,
- "Gst: %08x %08x %08x %08x %08x%s\n"
- "Hst: %08x %08x %08x %08x\n",
- i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
- i <= cStdMax ? "" : "*",
- Host.eax, Host.ebx, Host.ecx, Host.edx);
+ pHlp->pfnPrintf(pHlp,
+ "Gst: %08x %08x %08x %08x %08x%s\n"
+ "Hst: %08x %08x %08x %08x\n",
+ i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
+ i <= cStdMax ? "" : "*",
+ Host.eax, Host.ebx, Host.ecx, Host.edx);
+ }
+ else
+ {
+ ASMCpuIdExSlow(i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
+
+ pHlp->pfnPrintf(pHlp,
+ "Hst: %08x %08x %08x %08x %08x\n",
+ i, Host.eax, Host.ebx, Host.ecx, Host.edx);
+ }
}
/*
@@ -3411,12 +3858,12 @@ static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const cha
if (uECX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " AVX");
if (uECX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " 29");
if (uECX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " 30");
- if (uECX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " 31");
+ if (uECX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " HVP");
pHlp->pfnPrintf(pHlp, "\n");
}
else
{
- ASMCpuId(1, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
+ ASMCpuIdExSlow(1, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
X86CPUIDFEATEDX EdxHost = *(PX86CPUIDFEATEDX)&Host.edx;
X86CPUIDFEATECX EcxHost = *(PX86CPUIDFEATECX)&Host.ecx;
@@ -3508,7 +3955,7 @@ static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const cha
for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt); i++)
{
Guest = pVM->cpum.s.aGuestCpuIdExt[i];
- ASMCpuId(0x80000000 | i, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
+ ASMCpuIdExSlow(0x80000000 | i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
pHlp->pfnPrintf(pHlp,
"Gst: %08x %08x %08x %08x %08x%s\n"
@@ -3605,7 +4052,7 @@ static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const cha
}
else
{
- ASMCpuId(0x80000001, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
+ ASMCpuIdExSlow(0x80000001, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
uint32_t uEdxGst = Guest.edx;
uint32_t uEdxHst = Host.edx;
@@ -3800,7 +4247,7 @@ static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const cha
for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur); i++)
{
Guest = pVM->cpum.s.aGuestCpuIdCentaur[i];
- ASMCpuId(0xc0000000 | i, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
+ ASMCpuIdExSlow(0xc0000000 | i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
pHlp->pfnPrintf(pHlp,
"Gst: %08x %08x %08x %08x %08x%s\n"
@@ -3823,8 +4270,8 @@ static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const cha
if (iVerbosity && cCentaurMax >= 1)
{
- ASMCpuId(0xc0000001, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
- uint32_t uEdxGst = pVM->cpum.s.aGuestCpuIdExt[1].edx;
+ ASMCpuIdExSlow(0xc0000001, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
+ uint32_t uEdxGst = pVM->cpum.s.aGuestCpuIdCentaur[1].edx;
uint32_t uEdxHst = Host.edx;
if (iVerbosity == 1)
@@ -3941,8 +4388,8 @@ static DECLCALLBACK(int) cpumR3DisasInstrRead(PDISCPUSTATE pDis, uint8_t offInst
/* translate the address */
pState->pvPageGC = GCPtr & PAGE_BASE_GC_MASK;
- if ( MMHyperIsInsideArea(pState->pVM, pState->pvPageGC)
- && !HWACCMIsEnabled(pState->pVM))
+ if ( !HMIsEnabled(pState->pVM)
+ && MMHyperIsInsideArea(pState->pVM, pState->pvPageGC))
{
pState->pvPageR3 = MMHyperRCToR3(pState->pVM, (RTRCPTR)pState->pvPageGC);
if (!pState->pvPageR3)
@@ -4033,7 +4480,9 @@ VMMR3DECL(int) CPUMR3DisasmInstrCPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPT
{
if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs))
{
+# ifdef VBOX_WITH_RAW_MODE_NOT_R0
CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtx->cs);
+# endif
if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs))
return VERR_CPUM_HIDDEN_CS_LOAD_ERROR;
}
@@ -4172,152 +4621,6 @@ VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdDefRCPtr(PVM pVM)
/**
- * Transforms the guest CPU state to raw-ring mode.
- *
- * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
- *
- * @returns VBox status. (recompiler failure)
- * @param pVCpu Pointer to the VMCPU.
- * @param pCtxCore The context core (for trap usage).
- * @see @ref pg_raw
- */
-VMMR3DECL(int) CPUMR3RawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
-{
- PVM pVM = pVCpu->CTX_SUFF(pVM);
-
- Assert(!pVCpu->cpum.s.fRawEntered);
- Assert(!pVCpu->cpum.s.fRemEntered);
- if (!pCtxCore)
- pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
-
- /*
- * Are we in Ring-0?
- */
- if ( pCtxCore->ss.Sel && (pCtxCore->ss.Sel & X86_SEL_RPL) == 0
- && !pCtxCore->eflags.Bits.u1VM)
- {
- /*
- * Enter execution mode.
- */
- PATMRawEnter(pVM, pCtxCore);
-
- /*
- * Set CPL to Ring-1.
- */
- pCtxCore->ss.Sel |= 1;
- if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 0)
- pCtxCore->cs.Sel |= 1;
- }
- else
- {
- AssertMsg((pCtxCore->ss.Sel & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
- ("ring-1 code not supported\n"));
- /*
- * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
- */
- PATMRawEnter(pVM, pCtxCore);
- }
-
- /*
- * Assert sanity.
- */
- AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
- AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss.Sel & X86_SEL_RPL)
- || pCtxCore->eflags.Bits.u1VM,
- ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
- Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
-
- pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */
-
- pVCpu->cpum.s.fRawEntered = true;
- return VINF_SUCCESS;
-}
-
-
-/**
- * Transforms the guest CPU state from raw-ring mode to correct values.
- *
- * This function will change any selector registers with DPL=1 to DPL=0.
- *
- * @returns Adjusted rc.
- * @param pVCpu Pointer to the VMCPU.
- * @param rc Raw mode return code
- * @param pCtxCore The context core (for trap usage).
- * @see @ref pg_raw
- */
-VMMR3DECL(int) CPUMR3RawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc)
-{
- PVM pVM = pVCpu->CTX_SUFF(pVM);
-
- /*
- * Don't leave if we've already left (in GC).
- */
- Assert(pVCpu->cpum.s.fRawEntered);
- Assert(!pVCpu->cpum.s.fRemEntered);
- if (!pVCpu->cpum.s.fRawEntered)
- return rc;
- pVCpu->cpum.s.fRawEntered = false;
-
- PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
- if (!pCtxCore)
- pCtxCore = CPUMCTX2CORE(pCtx);
- Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss.Sel & X86_SEL_RPL));
- AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss.Sel & X86_SEL_RPL),
- ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
-
- /*
- * Are we executing in raw ring-1?
- */
- if ( (pCtxCore->ss.Sel & X86_SEL_RPL) == 1
- && !pCtxCore->eflags.Bits.u1VM)
- {
- /*
- * Leave execution mode.
- */
- PATMRawLeave(pVM, pCtxCore, rc);
- /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
- /** @todo See what happens if we remove this. */
- if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
- pCtxCore->ds.Sel &= ~X86_SEL_RPL;
- if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
- pCtxCore->es.Sel &= ~X86_SEL_RPL;
- if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
- pCtxCore->fs.Sel &= ~X86_SEL_RPL;
- if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
- pCtxCore->gs.Sel &= ~X86_SEL_RPL;
-
- /*
- * Ring-1 selector => Ring-0.
- */
- pCtxCore->ss.Sel &= ~X86_SEL_RPL;
- if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
- pCtxCore->cs.Sel &= ~X86_SEL_RPL;
- }
- else
- {
- /*
- * PATM is taking care of the IOPL and IF flags for us.
- */
- PATMRawLeave(pVM, pCtxCore, rc);
- if (!pCtxCore->eflags.Bits.u1VM)
- {
- /** @todo See what happens if we remove this. */
- if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
- pCtxCore->ds.Sel &= ~X86_SEL_RPL;
- if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
- pCtxCore->es.Sel &= ~X86_SEL_RPL;
- if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
- pCtxCore->fs.Sel &= ~X86_SEL_RPL;
- if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
- pCtxCore->gs.Sel &= ~X86_SEL_RPL;
- }
- }
-
- return rc;
-}
-
-
-/**
* Enters REM, gets and resets the changed flags (CPUM_CHANGED_*).
*
* Only REM should ever call this function!
@@ -4369,3 +4672,46 @@ VMMR3DECL(void) CPUMR3RemLeave(PVMCPU pVCpu, bool fNoOutOfSyncSels)
pVCpu->cpum.s.fRemEntered = false;
}
+
+/**
+ * Called when the ring-3 init phase completes.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ */
+VMMR3DECL(int) CPUMR3InitCompleted(PVM pVM)
+{
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ /* Cache the APIC base (from the APIC device) once it has been initialized. */
+ PDMApicGetBase(&pVM->aCpus[i], &pVM->aCpus[i].cpum.s.Guest.msrApicBase);
+ Log(("CPUMR3InitCompleted pVM=%p APIC base[%u]=%RX64\n", pVM, (unsigned)i, pVM->aCpus[i].cpum.s.Guest.msrApicBase));
+ }
+ return VINF_SUCCESS;
+}
+
+/**
+ * Called when the ring-0 init phases comleted.
+ *
+ * @param pVM Pointer to the VM.
+ */
+VMMR3DECL(void) CPUMR3LogCpuIds(PVM pVM)
+{
+ /*
+ * Log the cpuid.
+ */
+ bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
+ RTCPUSET OnlineSet;
+ LogRel(("Logical host processors: %u present, %u max, %u online, online mask: %016RX64\n",
+ (unsigned)RTMpGetPresentCount(), (unsigned)RTMpGetCount(), (unsigned)RTMpGetOnlineCount(),
+ RTCpuSetToU64(RTMpGetOnlineSet(&OnlineSet)) ));
+ RTCPUID cCores = RTMpGetCoreCount();
+ if (cCores)
+ LogRel(("Physical host cores: %u\n", (unsigned)cCores));
+ LogRel(("************************* CPUID dump ************************\n"));
+ DBGFR3Info(pVM->pUVM, "cpuid", "verbose", DBGFR3InfoLogRelHlp());
+ LogRel(("\n"));
+ DBGFR3_INFO_LOG(pVM, "cpuid", "verbose"); /* macro */
+ RTLogRelSetBuffering(fOldBuffered);
+ LogRel(("******************** End of CPUID dump **********************\n"));
+}
diff --git a/src/VBox/VMM/VMMR3/CPUMDbg.cpp b/src/VBox/VMM/VMMR3/CPUMDbg.cpp
index cf791ee2..cb6edc63 100644
--- a/src/VBox/VMM/VMMR3/CPUMDbg.cpp
+++ b/src/VBox/VMM/VMMR3/CPUMDbg.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2010-2011 Oracle Corporation
+ * Copyright (C) 2010-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -912,35 +912,85 @@ static DBGFREGSUBFIELD const g_aCpumRegFields_xmmN[] =
/** Sub-fields for the CR0 register. */
static DBGFREGSUBFIELD const g_aCpumRegFields_cr0[] =
{
- /** @todo */
+ DBGFREGSUBFIELD_RW("pe", 0, 1, 0),
+ DBGFREGSUBFIELD_RW("mp", 1, 1, 0),
+ DBGFREGSUBFIELD_RW("em", 2, 1, 0),
+ DBGFREGSUBFIELD_RW("ts", 3, 1, 0),
+ DBGFREGSUBFIELD_RO("et", 4, 1, 0),
+ DBGFREGSUBFIELD_RW("ne", 5, 1, 0),
+ DBGFREGSUBFIELD_RW("wp", 16, 1, 0),
+ DBGFREGSUBFIELD_RW("am", 18, 1, 0),
+ DBGFREGSUBFIELD_RW("nw", 29, 1, 0),
+ DBGFREGSUBFIELD_RW("cd", 30, 1, 0),
+ DBGFREGSUBFIELD_RW("pg", 31, 1, 0),
DBGFREGSUBFIELD_TERMINATOR()
};
/** Sub-fields for the CR3 register. */
static DBGFREGSUBFIELD const g_aCpumRegFields_cr3[] =
{
- /** @todo */
+ DBGFREGSUBFIELD_RW("pwt", 3, 1, 0),
+ DBGFREGSUBFIELD_RW("pcd", 4, 1, 0),
DBGFREGSUBFIELD_TERMINATOR()
};
/** Sub-fields for the CR4 register. */
static DBGFREGSUBFIELD const g_aCpumRegFields_cr4[] =
{
- /** @todo */
+ DBGFREGSUBFIELD_RW("vme", 0, 1, 0),
+ DBGFREGSUBFIELD_RW("pvi", 1, 1, 0),
+ DBGFREGSUBFIELD_RW("tsd", 2, 1, 0),
+ DBGFREGSUBFIELD_RW("de", 3, 1, 0),
+ DBGFREGSUBFIELD_RW("pse", 4, 1, 0),
+ DBGFREGSUBFIELD_RW("pae", 5, 1, 0),
+ DBGFREGSUBFIELD_RW("mce", 6, 1, 0),
+ DBGFREGSUBFIELD_RW("pge", 7, 1, 0),
+ DBGFREGSUBFIELD_RW("pce", 8, 1, 0),
+ DBGFREGSUBFIELD_RW("osfsxr", 9, 1, 0),
+ DBGFREGSUBFIELD_RW("osxmmeexcpt", 10, 1, 0),
+ DBGFREGSUBFIELD_RW("vmxe", 10, 1, 0),
+ DBGFREGSUBFIELD_RW("smxe", 13, 1, 0),
+ DBGFREGSUBFIELD_RW("pcide", 14, 1, 0),
+ DBGFREGSUBFIELD_RW("osxsave", 17, 1, 0),
+ DBGFREGSUBFIELD_RW("smep", 18, 1, 0),
DBGFREGSUBFIELD_TERMINATOR()
};
/** Sub-fields for the DR6 register. */
static DBGFREGSUBFIELD const g_aCpumRegFields_dr6[] =
{
- /** @todo */
+ DBGFREGSUBFIELD_RW("b0", 0, 1, 0),
+ DBGFREGSUBFIELD_RW("b1", 1, 1, 0),
+ DBGFREGSUBFIELD_RW("b2", 2, 1, 0),
+ DBGFREGSUBFIELD_RW("b3", 3, 1, 0),
+ DBGFREGSUBFIELD_RW("bd", 13, 1, 0),
+ DBGFREGSUBFIELD_RW("bs", 14, 1, 0),
+ DBGFREGSUBFIELD_RW("bt", 15, 1, 0),
DBGFREGSUBFIELD_TERMINATOR()
};
/** Sub-fields for the DR7 register. */
static DBGFREGSUBFIELD const g_aCpumRegFields_dr7[] =
{
- /** @todo */
+ DBGFREGSUBFIELD_RW("l0", 0, 1, 0),
+ DBGFREGSUBFIELD_RW("g0", 1, 1, 0),
+ DBGFREGSUBFIELD_RW("l1", 2, 1, 0),
+ DBGFREGSUBFIELD_RW("g1", 3, 1, 0),
+ DBGFREGSUBFIELD_RW("l2", 4, 1, 0),
+ DBGFREGSUBFIELD_RW("g2", 5, 1, 0),
+ DBGFREGSUBFIELD_RW("l3", 6, 1, 0),
+ DBGFREGSUBFIELD_RW("g3", 7, 1, 0),
+ DBGFREGSUBFIELD_RW("le", 8, 1, 0),
+ DBGFREGSUBFIELD_RW("ge", 9, 1, 0),
+ DBGFREGSUBFIELD_RW("gd", 13, 1, 0),
+ DBGFREGSUBFIELD_RW("rw0", 16, 2, 0),
+ DBGFREGSUBFIELD_RW("len0", 18, 2, 0),
+ DBGFREGSUBFIELD_RW("rw1", 20, 2, 0),
+ DBGFREGSUBFIELD_RW("len1", 22, 2, 0),
+ DBGFREGSUBFIELD_RW("rw2", 24, 2, 0),
+ DBGFREGSUBFIELD_RW("len2", 26, 2, 0),
+ DBGFREGSUBFIELD_RW("rw3", 28, 2, 0),
+ DBGFREGSUBFIELD_RW("len3", 30, 2, 0),
DBGFREGSUBFIELD_TERMINATOR()
};
diff --git a/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp b/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
new file mode 100644
index 00000000..2e094bf7
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
@@ -0,0 +1,1355 @@
+/* $Id: CPUMR3CpuId.cpp $ */
+/** @file
+ * CPUM - CPU ID part.
+ */
+
+/*
+ * Copyright (C) 2013-2014 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+/*******************************************************************************
+* Header Files *
+*******************************************************************************/
+#define LOG_GROUP LOG_GROUP_CPUM
+#include <VBox/vmm/cpum.h>
+#include "CPUMInternal.h"
+#include <VBox/vmm/vm.h>
+
+#include <VBox/err.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/ctype.h>
+#include <iprt/mem.h>
+#include <iprt/string.h>
+
+
+/*******************************************************************************
+* Global Variables *
+*******************************************************************************/
+/**
+ * The intel pentium family.
+ */
+static const CPUMMICROARCH g_aenmIntelFamily06[] =
+{
+ /* [ 0(0x00)] = */ kCpumMicroarch_Intel_P6, /* Pentium Pro A-step (says sandpile.org). */
+ /* [ 1(0x01)] = */ kCpumMicroarch_Intel_P6, /* Pentium Pro */
+ /* [ 2(0x02)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [ 3(0x03)] = */ kCpumMicroarch_Intel_P6_II, /* PII Klamath */
+ /* [ 4(0x04)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [ 5(0x05)] = */ kCpumMicroarch_Intel_P6_II, /* PII Deschutes */
+ /* [ 6(0x06)] = */ kCpumMicroarch_Intel_P6_II, /* Celeron Mendocino. */
+ /* [ 7(0x07)] = */ kCpumMicroarch_Intel_P6_III, /* PIII Katmai. */
+ /* [ 8(0x08)] = */ kCpumMicroarch_Intel_P6_III, /* PIII Coppermine (includes Celeron). */
+ /* [ 9(0x09)] = */ kCpumMicroarch_Intel_P6_M_Banias, /* Pentium/Celeron M Banias. */
+ /* [10(0x0a)] = */ kCpumMicroarch_Intel_P6_III, /* PIII Xeon */
+ /* [11(0x0b)] = */ kCpumMicroarch_Intel_P6_III, /* PIII Tualatin (includes Celeron). */
+ /* [12(0x0c)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [13(0x0d)] = */ kCpumMicroarch_Intel_P6_M_Dothan, /* Pentium/Celeron M Dothan. */
+ /* [14(0x0e)] = */ kCpumMicroarch_Intel_Core_Yonah, /* Core Yonah (Enhanced Pentium M). */
+ /* [15(0x0f)] = */ kCpumMicroarch_Intel_Core2_Merom, /* Merom */
+ /* [16(0x10)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [17(0x11)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [18(0x12)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [19(0x13)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [20(0x14)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [21(0x15)] = */ kCpumMicroarch_Intel_P6_M_Dothan, /* Tolapai - System-on-a-chip. */
+ /* [22(0x16)] = */ kCpumMicroarch_Intel_Core2_Merom,
+ /* [23(0x17)] = */ kCpumMicroarch_Intel_Core2_Penryn,
+ /* [24(0x18)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [25(0x19)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [26(0x1a)] = */ kCpumMicroarch_Intel_Core7_Nehalem,
+ /* [27(0x1b)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [28(0x1c)] = */ kCpumMicroarch_Intel_Atom_Bonnell, /* Diamonville, Pineview, */
+ /* [29(0x1d)] = */ kCpumMicroarch_Intel_Core2_Penryn,
+ /* [30(0x1e)] = */ kCpumMicroarch_Intel_Core7_Nehalem, /* Clarksfield, Lynnfield, Jasper Forest. */
+ /* [31(0x1f)] = */ kCpumMicroarch_Intel_Core7_Nehalem, /* Only listed by sandpile.org. 2 cores ABD/HVD, whatever that means. */
+ /* [32(0x20)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [33(0x21)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [34(0x22)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [35(0x23)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [36(0x24)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [37(0x25)] = */ kCpumMicroarch_Intel_Core7_Westmere, /* Arrandale, Clarksdale. */
+ /* [38(0x26)] = */ kCpumMicroarch_Intel_Atom_Lincroft,
+ /* [39(0x27)] = */ kCpumMicroarch_Intel_Atom_Saltwell,
+ /* [40(0x28)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [41(0x29)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [42(0x2a)] = */ kCpumMicroarch_Intel_Core7_SandyBridge,
+ /* [43(0x2b)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [44(0x2c)] = */ kCpumMicroarch_Intel_Core7_Westmere, /* Gulftown, Westmere-EP. */
+ /* [45(0x2d)] = */ kCpumMicroarch_Intel_Core7_SandyBridge, /* SandyBridge-E, SandyBridge-EN, SandyBridge-EP. */
+ /* [46(0x2e)] = */ kCpumMicroarch_Intel_Core7_Nehalem, /* Beckton (Xeon). */
+ /* [47(0x2f)] = */ kCpumMicroarch_Intel_Core7_Westmere, /* Westmere-EX. */
+ /* [48(0x30)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [49(0x31)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [50(0x32)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [51(0x33)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [52(0x34)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [53(0x35)] = */ kCpumMicroarch_Intel_Atom_Saltwell, /* ?? */
+ /* [54(0x36)] = */ kCpumMicroarch_Intel_Atom_Saltwell, /* Cedarview, ++ */
+ /* [55(0x37)] = */ kCpumMicroarch_Intel_Atom_Silvermont,
+ /* [56(0x38)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [57(0x39)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [58(0x3a)] = */ kCpumMicroarch_Intel_Core7_IvyBridge,
+ /* [59(0x3b)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [60(0x3c)] = */ kCpumMicroarch_Intel_Core7_Haswell,
+ /* [61(0x3d)] = */ kCpumMicroarch_Intel_Core7_Broadwell,
+ /* [62(0x3e)] = */ kCpumMicroarch_Intel_Core7_IvyBridge,
+ /* [63(0x3f)] = */ kCpumMicroarch_Intel_Core7_Haswell,
+ /* [64(0x40)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [65(0x41)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [66(0x42)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [67(0x43)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [68(0x44)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [69(0x45)] = */ kCpumMicroarch_Intel_Core7_Haswell,
+ /* [70(0x46)] = */ kCpumMicroarch_Intel_Core7_Haswell,
+ /* [71(0x47)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [72(0x48)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [73(0x49)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [74(0x4a)] = */ kCpumMicroarch_Intel_Atom_Silvermont,
+ /* [75(0x4b)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [76(0x4c)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [77(0x4d)] = */ kCpumMicroarch_Intel_Atom_Silvermont,
+ /* [78(0x4e)] = */ kCpumMicroarch_Intel_Unknown,
+ /* [79(0x4f)] = */ kCpumMicroarch_Intel_Unknown,
+};
+
+
+
+/**
+ * Figures out the (sub-)micro architecture given a bit of CPUID info.
+ *
+ * @returns Micro architecture.
+ * @param enmVendor The CPU vendor .
+ * @param bFamily The CPU family.
+ * @param bModel The CPU model.
+ * @param bStepping The CPU stepping.
+ */
+VMMR3DECL(CPUMMICROARCH) CPUMR3CpuIdDetermineMicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily,
+ uint8_t bModel, uint8_t bStepping)
+{
+ if (enmVendor == CPUMCPUVENDOR_AMD)
+ {
+ switch (bFamily)
+ {
+ case 0x02: return kCpumMicroarch_AMD_Am286; /* Not really kosher... */
+ case 0x03: return kCpumMicroarch_AMD_Am386;
+ case 0x23: return kCpumMicroarch_AMD_Am386; /* SX*/
+ case 0x04: return bModel < 14 ? kCpumMicroarch_AMD_Am486 : kCpumMicroarch_AMD_Am486Enh;
+ case 0x05: return bModel < 6 ? kCpumMicroarch_AMD_K5 : kCpumMicroarch_AMD_K6; /* Genode LX is 0x0a, lump it with K6. */
+ case 0x06:
+ switch (bModel)
+ {
+ case 0: kCpumMicroarch_AMD_K7_Palomino;
+ case 1: kCpumMicroarch_AMD_K7_Palomino;
+ case 2: kCpumMicroarch_AMD_K7_Palomino;
+ case 3: kCpumMicroarch_AMD_K7_Spitfire;
+ case 4: kCpumMicroarch_AMD_K7_Thunderbird;
+ case 6: kCpumMicroarch_AMD_K7_Palomino;
+ case 7: kCpumMicroarch_AMD_K7_Morgan;
+ case 8: kCpumMicroarch_AMD_K7_Thoroughbred;
+ case 10: kCpumMicroarch_AMD_K7_Barton; /* Thorton too. */
+ }
+ return kCpumMicroarch_AMD_K7_Unknown;
+ case 0x0f:
+ /*
+ * This family is a friggin mess. Trying my best to make some
+ * sense out of it. Too much happened in the 0x0f family to
+ * lump it all together as K8 (130nm->90nm->65nm, AMD-V, ++).
+ *
+ * Emperical CPUID.01h.EAX evidence from revision guides, wikipedia,
+ * cpu-world.com, and other places:
+ * - 130nm:
+ * - ClawHammer: F7A/SH-CG, F5A/-CG, F4A/-CG, F50/-B0, F48/-C0, F58/-C0,
+ * - SledgeHammer: F50/SH-B0, F48/-C0, F58/-C0, F4A/-CG, F5A/-CG, F7A/-CG, F51/-B3
+ * - Newcastle: FC0/DH-CG (errum #180: FE0/DH-CG), FF0/DH-CG
+ * - Dublin: FC0/-CG, FF0/-CG, F82/CH-CG, F4A/-CG, F48/SH-C0,
+ * - Odessa: FC0/DH-CG (errum #180: FE0/DH-CG)
+ * - Paris: FF0/DH-CG, FC0/DH-CG (errum #180: FE0/DH-CG),
+ * - 90nm:
+ * - Winchester: 10FF0/DH-D0, 20FF0/DH-E3.
+ * - Oakville: 10FC0/DH-D0.
+ * - Georgetown: 10FC0/DH-D0.
+ * - Sonora: 10FC0/DH-D0.
+ * - Venus: 20F71/SH-E4
+ * - Troy: 20F51/SH-E4
+ * - Athens: 20F51/SH-E4
+ * - San Diego: 20F71/SH-E4.
+ * - Lancaster: 20F42/SH-E5
+ * - Newark: 20F42/SH-E5.
+ * - Albany: 20FC2/DH-E6.
+ * - Roma: 20FC2/DH-E6.
+ * - Venice: 20FF0/DH-E3, 20FC2/DH-E6, 20FF2/DH-E6.
+ * - Palermo: 10FC0/DH-D0, 20FF0/DH-E3, 20FC0/DH-E3, 20FC2/DH-E6, 20FF2/DH-E6
+ * - 90nm introducing Dual core:
+ * - Denmark: 20F30/JH-E1, 20F32/JH-E6
+ * - Italy: 20F10/JH-E1, 20F12/JH-E6
+ * - Egypt: 20F10/JH-E1, 20F12/JH-E6
+ * - Toledo: 20F32/JH-E6, 30F72/DH-E6 (single code variant).
+ * - Manchester: 20FB1/BH-E4, 30FF2/BH-E4.
+ * - 90nm 2nd gen opteron ++, AMD-V introduced (might be missing in some cheeper models):
+ * - Santa Ana: 40F32/JH-F2, /-F3
+ * - Santa Rosa: 40F12/JH-F2, 40F13/JH-F3
+ * - Windsor: 40F32/JH-F2, 40F33/JH-F3, C0F13/JH-F3, 40FB2/BH-F2, ??20FB1/BH-E4??.
+ * - Manila: 50FF2/DH-F2, 40FF2/DH-F2
+ * - Orleans: 40FF2/DH-F2, 50FF2/DH-F2, 50FF3/DH-F3.
+ * - Keene: 40FC2/DH-F2.
+ * - Richmond: 40FC2/DH-F2
+ * - Taylor: 40F82/BH-F2
+ * - Trinidad: 40F82/BH-F2
+ *
+ * - 65nm:
+ * - Brisbane: 60FB1/BH-G1, 60FB2/BH-G2.
+ * - Tyler: 60F81/BH-G1, 60F82/BH-G2.
+ * - Sparta: 70FF1/DH-G1, 70FF2/DH-G2.
+ * - Lima: 70FF1/DH-G1, 70FF2/DH-G2.
+ * - Sherman: /-G1, 70FC2/DH-G2.
+ * - Huron: 70FF2/DH-G2.
+ */
+ if (bModel < 0x10)
+ return kCpumMicroarch_AMD_K8_130nm;
+ if (bModel >= 0x60 && bModel < 0x80)
+ return kCpumMicroarch_AMD_K8_65nm;
+ if (bModel >= 0x40)
+ return kCpumMicroarch_AMD_K8_90nm_AMDV;
+ switch (bModel)
+ {
+ case 0x21:
+ case 0x23:
+ case 0x2b:
+ case 0x2f:
+ case 0x37:
+ case 0x3f:
+ return kCpumMicroarch_AMD_K8_90nm_DualCore;
+ }
+ return kCpumMicroarch_AMD_K8_90nm;
+ case 0x10:
+ return kCpumMicroarch_AMD_K10;
+ case 0x11:
+ return kCpumMicroarch_AMD_K10_Lion;
+ case 0x12:
+ return kCpumMicroarch_AMD_K10_Llano;
+ case 0x14:
+ return kCpumMicroarch_AMD_Bobcat;
+ case 0x15:
+ switch (bModel)
+ {
+ case 0x00: return kCpumMicroarch_AMD_15h_Bulldozer; /* Any? prerelease? */
+ case 0x01: return kCpumMicroarch_AMD_15h_Bulldozer; /* Opteron 4200, FX-81xx. */
+ case 0x02: return kCpumMicroarch_AMD_15h_Piledriver; /* Opteron 4300, FX-83xx. */
+ case 0x10: return kCpumMicroarch_AMD_15h_Piledriver; /* A10-5800K for e.g. */
+ case 0x11: /* ?? */
+ case 0x12: /* ?? */
+ case 0x13: return kCpumMicroarch_AMD_15h_Piledriver; /* A10-6800K for e.g. */
+ }
+ return kCpumMicroarch_AMD_15h_Unknown;
+ case 0x16:
+ return kCpumMicroarch_AMD_Jaguar;
+
+ }
+ return kCpumMicroarch_AMD_Unknown;
+ }
+
+ if (enmVendor == CPUMCPUVENDOR_INTEL)
+ {
+ switch (bFamily)
+ {
+ case 3:
+ return kCpumMicroarch_Intel_80386;
+ case 4:
+ return kCpumMicroarch_Intel_80486;
+ case 5:
+ return kCpumMicroarch_Intel_P5;
+ case 6:
+ if (bModel < RT_ELEMENTS(g_aenmIntelFamily06))
+ return g_aenmIntelFamily06[bModel];
+ return kCpumMicroarch_Intel_Atom_Unknown;
+ case 15:
+ switch (bModel)
+ {
+ case 0: return kCpumMicroarch_Intel_NB_Willamette;
+ case 1: return kCpumMicroarch_Intel_NB_Willamette;
+ case 2: return kCpumMicroarch_Intel_NB_Northwood;
+ case 3: return kCpumMicroarch_Intel_NB_Prescott;
+ case 4: return kCpumMicroarch_Intel_NB_Prescott2M; /* ?? */
+ case 5: return kCpumMicroarch_Intel_NB_Unknown; /*??*/
+ case 6: return kCpumMicroarch_Intel_NB_CedarMill;
+ case 7: return kCpumMicroarch_Intel_NB_Gallatin;
+ default: return kCpumMicroarch_Intel_NB_Unknown;
+ }
+ break;
+ /* The following are not kosher but kind of follow intuitively from 6, 5 & 4. */
+ case 1:
+ return kCpumMicroarch_Intel_8086;
+ case 2:
+ return kCpumMicroarch_Intel_80286;
+ }
+ return kCpumMicroarch_Intel_Unknown;
+ }
+
+ if (enmVendor == CPUMCPUVENDOR_VIA)
+ {
+ switch (bFamily)
+ {
+ case 5:
+ switch (bModel)
+ {
+ case 1: return kCpumMicroarch_Centaur_C6;
+ case 4: return kCpumMicroarch_Centaur_C6;
+ case 8: return kCpumMicroarch_Centaur_C2;
+ case 9: return kCpumMicroarch_Centaur_C3;
+ }
+ break;
+
+ case 6:
+ switch (bModel)
+ {
+ case 5: return kCpumMicroarch_VIA_C3_M2;
+ case 6: return kCpumMicroarch_VIA_C3_C5A;
+ case 7: return bStepping < 8 ? kCpumMicroarch_VIA_C3_C5B : kCpumMicroarch_VIA_C3_C5C;
+ case 8: return kCpumMicroarch_VIA_C3_C5N;
+ case 9: return bStepping < 8 ? kCpumMicroarch_VIA_C3_C5XL : kCpumMicroarch_VIA_C3_C5P;
+ case 10: return kCpumMicroarch_VIA_C7_C5J;
+ case 15: return kCpumMicroarch_VIA_Isaiah;
+ }
+ break;
+ }
+ return kCpumMicroarch_VIA_Unknown;
+ }
+
+ if (enmVendor == CPUMCPUVENDOR_CYRIX)
+ {
+ switch (bFamily)
+ {
+ case 4:
+ switch (bModel)
+ {
+ case 9: return kCpumMicroarch_Cyrix_5x86;
+ }
+ break;
+
+ case 5:
+ switch (bModel)
+ {
+ case 2: return kCpumMicroarch_Cyrix_M1;
+ case 4: return kCpumMicroarch_Cyrix_MediaGX;
+ case 5: return kCpumMicroarch_Cyrix_MediaGXm;
+ }
+ break;
+
+ case 6:
+ switch (bModel)
+ {
+ case 0: return kCpumMicroarch_Cyrix_M2;
+ }
+ break;
+
+ }
+ return kCpumMicroarch_Cyrix_Unknown;
+ }
+
+ return kCpumMicroarch_Unknown;
+}
+
+
+/**
+ * Translates a microarchitecture enum value to the corresponding string
+ * constant.
+ *
+ * @returns Read-only string constant (omits "kCpumMicroarch_" prefix). Returns
+ * NULL if the value is invalid.
+ *
+ * @param enmMicroarch The enum value to convert.
+ */
+VMMR3DECL(const char *) CPUMR3MicroarchName(CPUMMICROARCH enmMicroarch)
+{
+ switch (enmMicroarch)
+ {
+#define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("kCpumMicroarch_") - 1)
+ CASE_RET_STR(kCpumMicroarch_Intel_8086);
+ CASE_RET_STR(kCpumMicroarch_Intel_80186);
+ CASE_RET_STR(kCpumMicroarch_Intel_80286);
+ CASE_RET_STR(kCpumMicroarch_Intel_80386);
+ CASE_RET_STR(kCpumMicroarch_Intel_80486);
+ CASE_RET_STR(kCpumMicroarch_Intel_P5);
+
+ CASE_RET_STR(kCpumMicroarch_Intel_P6);
+ CASE_RET_STR(kCpumMicroarch_Intel_P6_II);
+ CASE_RET_STR(kCpumMicroarch_Intel_P6_III);
+
+ CASE_RET_STR(kCpumMicroarch_Intel_P6_M_Banias);
+ CASE_RET_STR(kCpumMicroarch_Intel_P6_M_Dothan);
+ CASE_RET_STR(kCpumMicroarch_Intel_Core_Yonah);
+
+ CASE_RET_STR(kCpumMicroarch_Intel_Core2_Merom);
+ CASE_RET_STR(kCpumMicroarch_Intel_Core2_Penryn);
+
+ CASE_RET_STR(kCpumMicroarch_Intel_Core7_Nehalem);
+ CASE_RET_STR(kCpumMicroarch_Intel_Core7_Westmere);
+ CASE_RET_STR(kCpumMicroarch_Intel_Core7_SandyBridge);
+ CASE_RET_STR(kCpumMicroarch_Intel_Core7_IvyBridge);
+ CASE_RET_STR(kCpumMicroarch_Intel_Core7_Haswell);
+ CASE_RET_STR(kCpumMicroarch_Intel_Core7_Broadwell);
+ CASE_RET_STR(kCpumMicroarch_Intel_Core7_Skylake);
+ CASE_RET_STR(kCpumMicroarch_Intel_Core7_Cannonlake);
+
+ CASE_RET_STR(kCpumMicroarch_Intel_Atom_Bonnell);
+ CASE_RET_STR(kCpumMicroarch_Intel_Atom_Lincroft);
+ CASE_RET_STR(kCpumMicroarch_Intel_Atom_Saltwell);
+ CASE_RET_STR(kCpumMicroarch_Intel_Atom_Silvermont);
+ CASE_RET_STR(kCpumMicroarch_Intel_Atom_Airmount);
+ CASE_RET_STR(kCpumMicroarch_Intel_Atom_Goldmont);
+ CASE_RET_STR(kCpumMicroarch_Intel_Atom_Unknown);
+
+ CASE_RET_STR(kCpumMicroarch_Intel_NB_Willamette);
+ CASE_RET_STR(kCpumMicroarch_Intel_NB_Northwood);
+ CASE_RET_STR(kCpumMicroarch_Intel_NB_Prescott);
+ CASE_RET_STR(kCpumMicroarch_Intel_NB_Prescott2M);
+ CASE_RET_STR(kCpumMicroarch_Intel_NB_CedarMill);
+ CASE_RET_STR(kCpumMicroarch_Intel_NB_Gallatin);
+ CASE_RET_STR(kCpumMicroarch_Intel_NB_Unknown);
+
+ CASE_RET_STR(kCpumMicroarch_Intel_Unknown);
+
+ CASE_RET_STR(kCpumMicroarch_AMD_Am286);
+ CASE_RET_STR(kCpumMicroarch_AMD_Am386);
+ CASE_RET_STR(kCpumMicroarch_AMD_Am486);
+ CASE_RET_STR(kCpumMicroarch_AMD_Am486Enh);
+ CASE_RET_STR(kCpumMicroarch_AMD_K5);
+ CASE_RET_STR(kCpumMicroarch_AMD_K6);
+
+ CASE_RET_STR(kCpumMicroarch_AMD_K7_Palomino);
+ CASE_RET_STR(kCpumMicroarch_AMD_K7_Spitfire);
+ CASE_RET_STR(kCpumMicroarch_AMD_K7_Thunderbird);
+ CASE_RET_STR(kCpumMicroarch_AMD_K7_Morgan);
+ CASE_RET_STR(kCpumMicroarch_AMD_K7_Thoroughbred);
+ CASE_RET_STR(kCpumMicroarch_AMD_K7_Barton);
+ CASE_RET_STR(kCpumMicroarch_AMD_K7_Unknown);
+
+ CASE_RET_STR(kCpumMicroarch_AMD_K8_130nm);
+ CASE_RET_STR(kCpumMicroarch_AMD_K8_90nm);
+ CASE_RET_STR(kCpumMicroarch_AMD_K8_90nm_DualCore);
+ CASE_RET_STR(kCpumMicroarch_AMD_K8_90nm_AMDV);
+ CASE_RET_STR(kCpumMicroarch_AMD_K8_65nm);
+
+ CASE_RET_STR(kCpumMicroarch_AMD_K10);
+ CASE_RET_STR(kCpumMicroarch_AMD_K10_Lion);
+ CASE_RET_STR(kCpumMicroarch_AMD_K10_Llano);
+ CASE_RET_STR(kCpumMicroarch_AMD_Bobcat);
+ CASE_RET_STR(kCpumMicroarch_AMD_Jaguar);
+
+ CASE_RET_STR(kCpumMicroarch_AMD_15h_Bulldozer);
+ CASE_RET_STR(kCpumMicroarch_AMD_15h_Piledriver);
+ CASE_RET_STR(kCpumMicroarch_AMD_15h_Steamroller);
+ CASE_RET_STR(kCpumMicroarch_AMD_15h_Excavator);
+ CASE_RET_STR(kCpumMicroarch_AMD_15h_Unknown);
+
+ CASE_RET_STR(kCpumMicroarch_AMD_16h_First);
+
+ CASE_RET_STR(kCpumMicroarch_AMD_Unknown);
+
+ CASE_RET_STR(kCpumMicroarch_Centaur_C6);
+ CASE_RET_STR(kCpumMicroarch_Centaur_C2);
+ CASE_RET_STR(kCpumMicroarch_Centaur_C3);
+ CASE_RET_STR(kCpumMicroarch_VIA_C3_M2);
+ CASE_RET_STR(kCpumMicroarch_VIA_C3_C5A);
+ CASE_RET_STR(kCpumMicroarch_VIA_C3_C5B);
+ CASE_RET_STR(kCpumMicroarch_VIA_C3_C5C);
+ CASE_RET_STR(kCpumMicroarch_VIA_C3_C5N);
+ CASE_RET_STR(kCpumMicroarch_VIA_C3_C5XL);
+ CASE_RET_STR(kCpumMicroarch_VIA_C3_C5P);
+ CASE_RET_STR(kCpumMicroarch_VIA_C7_C5J);
+ CASE_RET_STR(kCpumMicroarch_VIA_Isaiah);
+ CASE_RET_STR(kCpumMicroarch_VIA_Unknown);
+
+ CASE_RET_STR(kCpumMicroarch_Cyrix_5x86);
+ CASE_RET_STR(kCpumMicroarch_Cyrix_M1);
+ CASE_RET_STR(kCpumMicroarch_Cyrix_MediaGX);
+ CASE_RET_STR(kCpumMicroarch_Cyrix_MediaGXm);
+ CASE_RET_STR(kCpumMicroarch_Cyrix_M2);
+ CASE_RET_STR(kCpumMicroarch_Cyrix_Unknown);
+
+ CASE_RET_STR(kCpumMicroarch_Unknown);
+
+#undef CASE_RET_STR
+ case kCpumMicroarch_Invalid:
+ case kCpumMicroarch_Intel_End:
+ case kCpumMicroarch_Intel_Core7_End:
+ case kCpumMicroarch_Intel_Atom_End:
+ case kCpumMicroarch_Intel_P6_Core_Atom_End:
+ case kCpumMicroarch_Intel_NB_End:
+ case kCpumMicroarch_AMD_K7_End:
+ case kCpumMicroarch_AMD_K8_End:
+ case kCpumMicroarch_AMD_15h_End:
+ case kCpumMicroarch_AMD_16h_End:
+ case kCpumMicroarch_AMD_End:
+ case kCpumMicroarch_VIA_End:
+ case kCpumMicroarch_Cyrix_End:
+ case kCpumMicroarch_32BitHack:
+ break;
+ /* no default! */
+ }
+
+ return NULL;
+}
+
+
+
+/**
+ * Gets a matching leaf in the CPUID leaf array.
+ *
+ * @returns Pointer to the matching leaf, or NULL if not found.
+ * @param paLeaves The CPUID leaves to search. This is sorted.
+ * @param cLeaves The number of leaves in the array.
+ * @param uLeaf The leaf to locate.
+ * @param uSubLeaf The subleaf to locate. Pass 0 if no subleaves.
+ */
+PCPUMCPUIDLEAF cpumR3CpuIdGetLeaf(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf)
+{
+ /* Lazy bird does linear lookup here since this is only used for the
+ occational CPUID overrides. */
+ for (uint32_t i = 0; i < cLeaves; i++)
+ if ( paLeaves[i].uLeaf == uLeaf
+ && paLeaves[i].uSubLeaf == (uSubLeaf & paLeaves[i].fSubLeafMask))
+ return &paLeaves[i];
+ return NULL;
+}
+
+
+/**
+ * Gets a matching leaf in the CPUID leaf array, converted to a CPUMCPUID.
+ *
+ * @returns true if found, false it not.
+ * @param paLeaves The CPUID leaves to search. This is sorted.
+ * @param cLeaves The number of leaves in the array.
+ * @param uLeaf The leaf to locate.
+ * @param uSubLeaf The subleaf to locate. Pass 0 if no subleaves.
+ * @param pLegacy The legacy output leaf.
+ */
+bool cpumR3CpuIdGetLeafLegacy(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf, PCPUMCPUID pLeagcy)
+{
+ PCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, uLeaf, uSubLeaf);
+ if (pLeaf)
+ {
+ pLeagcy->eax = pLeaf->uEax;
+ pLeagcy->ebx = pLeaf->uEbx;
+ pLeagcy->ecx = pLeaf->uEcx;
+ pLeagcy->edx = pLeaf->uEdx;
+ return true;
+ }
+ return false;
+}
+
+
+/**
+ * Ensures that the CPUID leaf array can hold one more leaf.
+ *
+ * @returns Pointer to the CPUID leaf array (*ppaLeaves) on success. NULL on
+ * failure.
+ * @param ppaLeaves Pointer to the variable holding the array
+ * pointer (input/output).
+ * @param cLeaves The current array size.
+ */
+static PCPUMCPUIDLEAF cpumR3CpuIdEnsureSpace(PCPUMCPUIDLEAF *ppaLeaves, uint32_t cLeaves)
+{
+ uint32_t cAllocated = RT_ALIGN(cLeaves, 16);
+ if (cLeaves + 1 > cAllocated)
+ {
+ void *pvNew = RTMemRealloc(*ppaLeaves, (cAllocated + 16) * sizeof(**ppaLeaves));
+ if (!pvNew)
+ {
+ RTMemFree(*ppaLeaves);
+ *ppaLeaves = NULL;
+ return NULL;
+ }
+ *ppaLeaves = (PCPUMCPUIDLEAF)pvNew;
+ }
+ return *ppaLeaves;
+}
+
+
+/**
+ * Append a CPUID leaf or sub-leaf.
+ *
+ * ASSUMES linear insertion order, so we'll won't need to do any searching or
+ * replace anything. Use cpumR3CpuIdInsert for those cases.
+ *
+ * @returns VINF_SUCCESS or VERR_NO_MEMORY. On error, *ppaLeaves is freed, so
+ * the caller need do no more work.
+ * @param ppaLeaves Pointer to the the pointer to the array of sorted
+ * CPUID leaves and sub-leaves.
+ * @param pcLeaves Where we keep the leaf count for *ppaLeaves.
+ * @param uLeaf The leaf we're adding.
+ * @param uSubLeaf The sub-leaf number.
+ * @param fSubLeafMask The sub-leaf mask.
+ * @param uEax The EAX value.
+ * @param uEbx The EBX value.
+ * @param uEcx The ECX value.
+ * @param uEdx The EDX value.
+ * @param fFlags The flags.
+ */
+static int cpumR3CollectCpuIdInfoAddOne(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves,
+ uint32_t uLeaf, uint32_t uSubLeaf, uint32_t fSubLeafMask,
+ uint32_t uEax, uint32_t uEbx, uint32_t uEcx, uint32_t uEdx, uint32_t fFlags)
+{
+ if (!cpumR3CpuIdEnsureSpace(ppaLeaves, *pcLeaves))
+ return VERR_NO_MEMORY;
+
+ PCPUMCPUIDLEAF pNew = &(*ppaLeaves)[*pcLeaves];
+ Assert( *pcLeaves == 0
+ || pNew[-1].uLeaf < uLeaf
+ || (pNew[-1].uLeaf == uLeaf && pNew[-1].uSubLeaf < uSubLeaf) );
+
+ pNew->uLeaf = uLeaf;
+ pNew->uSubLeaf = uSubLeaf;
+ pNew->fSubLeafMask = fSubLeafMask;
+ pNew->uEax = uEax;
+ pNew->uEbx = uEbx;
+ pNew->uEcx = uEcx;
+ pNew->uEdx = uEdx;
+ pNew->fFlags = fFlags;
+
+ *pcLeaves += 1;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Inserts a CPU ID leaf, replacing any existing ones.
+ *
+ * When inserting a simple leaf where we already got a series of subleaves with
+ * the same leaf number (eax), the simple leaf will replace the whole series.
+ *
+ * This ASSUMES that the leave array is still on the normal heap and has only
+ * been allocated/reallocated by the cpumR3CpuIdEnsureSpace function.
+ *
+ * @returns VBox status code.
+ * @param ppaLeaves Pointer to the the pointer to the array of sorted
+ * CPUID leaves and sub-leaves.
+ * @param pcLeaves Where we keep the leaf count for *ppaLeaves.
+ * @param pNewLeaf Pointer to the data of the new leaf we're about to
+ * insert.
+ */
+int cpumR3CpuIdInsert(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves, PCPUMCPUIDLEAF pNewLeaf)
+{
+ PCPUMCPUIDLEAF paLeaves = *ppaLeaves;
+ uint32_t cLeaves = *pcLeaves;
+
+ /*
+ * Validate the new leaf a little.
+ */
+ AssertReturn(!(pNewLeaf->fFlags & ~CPUMCPUIDLEAF_F_SUBLEAVES_ECX_UNCHANGED), VERR_INVALID_FLAGS);
+ AssertReturn(pNewLeaf->fSubLeafMask != 0 || pNewLeaf->uSubLeaf == 0, VERR_INVALID_PARAMETER);
+ AssertReturn(RT_IS_POWER_OF_TWO(pNewLeaf->fSubLeafMask + 1), VERR_INVALID_PARAMETER);
+ AssertReturn((pNewLeaf->fSubLeafMask & pNewLeaf->uSubLeaf) == pNewLeaf->uSubLeaf, VERR_INVALID_PARAMETER);
+
+
+ /*
+ * Find insertion point. The lazy bird uses the same excuse as in
+ * cpumR3CpuIdGetLeaf().
+ */
+ uint32_t i = 0;
+ while ( i < cLeaves
+ && paLeaves[i].uLeaf < pNewLeaf->uLeaf)
+ i++;
+ if ( i < cLeaves
+ && paLeaves[i].uLeaf == pNewLeaf->uLeaf)
+ {
+ if (paLeaves[i].fSubLeafMask != pNewLeaf->fSubLeafMask)
+ {
+ /*
+ * The subleaf mask differs, replace all existing leaves with the
+ * same leaf number.
+ */
+ uint32_t c = 1;
+ while ( i + c < cLeaves
+ && paLeaves[i + c].uSubLeaf == pNewLeaf->uLeaf)
+ c++;
+ if (c > 1 && i + c < cLeaves)
+ {
+ memmove(&paLeaves[i + c], &paLeaves[i + 1], (cLeaves - i - c) * sizeof(paLeaves[0]));
+ *pcLeaves = cLeaves -= c - 1;
+ }
+
+ paLeaves[i] = *pNewLeaf;
+ return VINF_SUCCESS;
+ }
+
+ /* Find subleaf insertion point. */
+ while ( i < cLeaves
+ && paLeaves[i].uSubLeaf < pNewLeaf->uSubLeaf)
+ i++;
+
+ /*
+ * If we've got an exactly matching leaf, replace it.
+ */
+ if ( paLeaves[i].uLeaf == pNewLeaf->uLeaf
+ && paLeaves[i].uSubLeaf == pNewLeaf->uSubLeaf)
+ {
+ paLeaves[i] = *pNewLeaf;
+ return VINF_SUCCESS;
+ }
+ }
+
+ /*
+ * Adding a new leaf at 'i'.
+ */
+ paLeaves = cpumR3CpuIdEnsureSpace(ppaLeaves, cLeaves);
+ if (!paLeaves)
+ return VERR_NO_MEMORY;
+
+ if (i < cLeaves)
+ memmove(&paLeaves[i + 1], &paLeaves[i], (cLeaves - i) * sizeof(paLeaves[0]));
+ *pcLeaves += 1;
+ paLeaves[i] = *pNewLeaf;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Removes a range of CPUID leaves.
+ *
+ * This will not reallocate the array.
+ *
+ * @param paLeaves The array of sorted CPUID leaves and sub-leaves.
+ * @param pcLeaves Where we keep the leaf count for @a paLeaves.
+ * @param uFirst The first leaf.
+ * @param uLast The last leaf.
+ */
+void cpumR3CpuIdRemoveRange(PCPUMCPUIDLEAF paLeaves, uint32_t *pcLeaves, uint32_t uFirst, uint32_t uLast)
+{
+ uint32_t cLeaves = *pcLeaves;
+
+ Assert(uFirst <= uLast);
+
+ /*
+ * Find the first one.
+ */
+ uint32_t iFirst = 0;
+ while ( iFirst < cLeaves
+ && paLeaves[iFirst].uLeaf < uFirst)
+ iFirst++;
+
+ /*
+ * Find the end (last + 1).
+ */
+ uint32_t iEnd = iFirst;
+ while ( iEnd < cLeaves
+ && paLeaves[iEnd].uLeaf <= uLast)
+ iEnd++;
+
+ /*
+ * Adjust the array if anything needs removing.
+ */
+ if (iFirst < iEnd)
+ {
+ if (iEnd < cLeaves)
+ memmove(&paLeaves[iFirst], &paLeaves[iEnd], (cLeaves - iEnd) * sizeof(paLeaves[0]));
+ *pcLeaves = cLeaves -= (iEnd - iFirst);
+ }
+}
+
+
+
+/**
+ * Checks if ECX make a difference when reading a given CPUID leaf.
+ *
+ * @returns @c true if it does, @c false if it doesn't.
+ * @param uLeaf The leaf we're reading.
+ * @param pcSubLeaves Number of sub-leaves accessible via ECX.
+ * @param pfFinalEcxUnchanged Whether ECX is passed thru when going beyond the
+ * final sub-leaf.
+ */
+static bool cpumR3IsEcxRelevantForCpuIdLeaf(uint32_t uLeaf, uint32_t *pcSubLeaves, bool *pfFinalEcxUnchanged)
+{
+ *pfFinalEcxUnchanged = false;
+
+ uint32_t auCur[4];
+ uint32_t auPrev[4];
+ ASMCpuIdExSlow(uLeaf, 0, 0, 0, &auPrev[0], &auPrev[1], &auPrev[2], &auPrev[3]);
+
+ /* Look for sub-leaves. */
+ uint32_t uSubLeaf = 1;
+ for (;;)
+ {
+ ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
+ if (memcmp(auCur, auPrev, sizeof(auCur)))
+ break;
+
+ /* Advance / give up. */
+ uSubLeaf++;
+ if (uSubLeaf >= 64)
+ {
+ *pcSubLeaves = 1;
+ return false;
+ }
+ }
+
+ /* Count sub-leaves. */
+ uint32_t cRepeats = 0;
+ uSubLeaf = 0;
+ for (;;)
+ {
+ ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
+
+ /* Figuring out when to stop isn't entirely straight forward as we need
+ to cover undocumented behavior up to a point and implementation shortcuts. */
+
+ /* 1. Look for zero values. */
+ if ( auCur[0] == 0
+ && auCur[1] == 0
+ && (auCur[2] == 0 || auCur[2] == uSubLeaf)
+ && (auCur[3] == 0 || uLeaf == 0xb /* edx is fixed */) )
+ break;
+
+ /* 2. Look for more than 4 repeating value sets. */
+ if ( auCur[0] == auPrev[0]
+ && auCur[1] == auPrev[1]
+ && ( auCur[2] == auPrev[2]
+ || ( auCur[2] == uSubLeaf
+ && auPrev[2] == uSubLeaf - 1) )
+ && auCur[3] == auPrev[3])
+ {
+ cRepeats++;
+ if (cRepeats > 4)
+ break;
+ }
+ else
+ cRepeats = 0;
+
+ /* 3. Leaf 0xb level type 0 check. */
+ if ( uLeaf == 0xb
+ && (auCur[3] & 0xff00) == 0
+ && (auPrev[3] & 0xff00) == 0)
+ break;
+
+ /* 99. Give up. */
+ if (uSubLeaf >= 128)
+ {
+#ifndef IN_VBOX_CPU_REPORT
+ /* Ok, limit it according to the documentation if possible just to
+ avoid annoying users with these detection issues. */
+ uint32_t cDocLimit = UINT32_MAX;
+ if (uLeaf == 0x4)
+ cDocLimit = 4;
+ else if (uLeaf == 0x7)
+ cDocLimit = 1;
+ else if (uLeaf == 0xf)
+ cDocLimit = 2;
+ if (cDocLimit != UINT32_MAX)
+ {
+ *pfFinalEcxUnchanged = auCur[2] == uSubLeaf;
+ *pcSubLeaves = cDocLimit + 3;
+ return true;
+ }
+#endif
+ *pcSubLeaves = UINT32_MAX;
+ return true;
+ }
+
+ /* Advance. */
+ uSubLeaf++;
+ memcpy(auPrev, auCur, sizeof(auCur));
+ }
+
+ /* Standard exit. */
+ *pfFinalEcxUnchanged = auCur[2] == uSubLeaf;
+ *pcSubLeaves = uSubLeaf + 1 - cRepeats;
+ return true;
+}
+
+
+/**
+ * Collects CPUID leaves and sub-leaves, returning a sorted array of them.
+ *
+ * @returns VBox status code.
+ * @param ppaLeaves Where to return the array pointer on success.
+ * Use RTMemFree to release.
+ * @param pcLeaves Where to return the size of the array on
+ * success.
+ */
+VMMR3DECL(int) CPUMR3CpuIdCollectLeaves(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
+{
+ *ppaLeaves = NULL;
+ *pcLeaves = 0;
+
+ /*
+ * Try out various candidates. This must be sorted!
+ */
+ static struct { uint32_t uMsr; bool fSpecial; } const s_aCandidates[] =
+ {
+ { UINT32_C(0x00000000), false },
+ { UINT32_C(0x10000000), false },
+ { UINT32_C(0x20000000), false },
+ { UINT32_C(0x30000000), false },
+ { UINT32_C(0x40000000), false },
+ { UINT32_C(0x50000000), false },
+ { UINT32_C(0x60000000), false },
+ { UINT32_C(0x70000000), false },
+ { UINT32_C(0x80000000), false },
+ { UINT32_C(0x80860000), false },
+ { UINT32_C(0x8ffffffe), true },
+ { UINT32_C(0x8fffffff), true },
+ { UINT32_C(0x90000000), false },
+ { UINT32_C(0xa0000000), false },
+ { UINT32_C(0xb0000000), false },
+ { UINT32_C(0xc0000000), false },
+ { UINT32_C(0xd0000000), false },
+ { UINT32_C(0xe0000000), false },
+ { UINT32_C(0xf0000000), false },
+ };
+
+ for (uint32_t iOuter = 0; iOuter < RT_ELEMENTS(s_aCandidates); iOuter++)
+ {
+ uint32_t uLeaf = s_aCandidates[iOuter].uMsr;
+ uint32_t uEax, uEbx, uEcx, uEdx;
+ ASMCpuIdExSlow(uLeaf, 0, 0, 0, &uEax, &uEbx, &uEcx, &uEdx);
+
+ /*
+ * Does EAX look like a typical leaf count value?
+ */
+ if ( uEax > uLeaf
+ && uEax - uLeaf < UINT32_C(0xff)) /* Adjust 0xff limit when exceeded by real HW. */
+ {
+ /* Yes, dump them. */
+ uint32_t cLeaves = uEax - uLeaf + 1;
+ while (cLeaves-- > 0)
+ {
+ /* Check three times here to reduce the chance of CPU migration
+ resulting in false positives with things like the APIC ID. */
+ uint32_t cSubLeaves;
+ bool fFinalEcxUnchanged;
+ if ( cpumR3IsEcxRelevantForCpuIdLeaf(uLeaf, &cSubLeaves, &fFinalEcxUnchanged)
+ && cpumR3IsEcxRelevantForCpuIdLeaf(uLeaf, &cSubLeaves, &fFinalEcxUnchanged)
+ && cpumR3IsEcxRelevantForCpuIdLeaf(uLeaf, &cSubLeaves, &fFinalEcxUnchanged))
+ {
+ if (cSubLeaves > 16)
+ {
+ /* This shouldn't happen. But in case it does, file all
+ relevant details in the release log. */
+ LogRel(("CPUM: VERR_CPUM_TOO_MANY_CPUID_SUBLEAVES! uLeaf=%#x cSubLeaves=%#x\n", uLeaf, cSubLeaves));
+ LogRel(("------------------ dump of problematic subleaves ------------------\n"));
+ for (uint32_t uSubLeaf = 0; uSubLeaf < 128; uSubLeaf++)
+ {
+ uint32_t auTmp[4];
+ ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &auTmp[0], &auTmp[1], &auTmp[2], &auTmp[3]);
+ LogRel(("CPUM: %#010x, %#010x => %#010x %#010x %#010x %#010x\n",
+ uLeaf, uSubLeaf, auTmp[0], auTmp[1], auTmp[2], auTmp[3]));
+ }
+ LogRel(("----------------- dump of what we've found so far -----------------\n"));
+ for (uint32_t i = 0 ; i < *pcLeaves; i++)
+ LogRel(("CPUM: %#010x, %#010x/%#010x => %#010x %#010x %#010x %#010x\n",
+ (*ppaLeaves)[i].uLeaf, (*ppaLeaves)[i].uSubLeaf, (*ppaLeaves)[i].fSubLeafMask,
+ (*ppaLeaves)[i].uEax, (*ppaLeaves)[i].uEbx, (*ppaLeaves)[i].uEcx, (*ppaLeaves)[i].uEdx));
+ LogRel(("\nPlease create a defect on virtualbox.org and attach this log file!\n\n"));
+ return VERR_CPUM_TOO_MANY_CPUID_SUBLEAVES;
+ }
+
+ for (uint32_t uSubLeaf = 0; uSubLeaf < cSubLeaves; uSubLeaf++)
+ {
+ ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &uEax, &uEbx, &uEcx, &uEdx);
+ int rc = cpumR3CollectCpuIdInfoAddOne(ppaLeaves, pcLeaves,
+ uLeaf, uSubLeaf, UINT32_MAX, uEax, uEbx, uEcx, uEdx,
+ uSubLeaf + 1 == cSubLeaves && fFinalEcxUnchanged
+ ? CPUMCPUIDLEAF_F_SUBLEAVES_ECX_UNCHANGED : 0);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ }
+ else
+ {
+ ASMCpuIdExSlow(uLeaf, 0, 0, 0, &uEax, &uEbx, &uEcx, &uEdx);
+ int rc = cpumR3CollectCpuIdInfoAddOne(ppaLeaves, pcLeaves,
+ uLeaf, 0, 0, uEax, uEbx, uEcx, uEdx, 0);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ /* next */
+ uLeaf++;
+ }
+ }
+ /*
+ * Special CPUIDs needs special handling as they don't follow the
+ * leaf count principle used above.
+ */
+ else if (s_aCandidates[iOuter].fSpecial)
+ {
+ bool fKeep = false;
+ if (uLeaf == 0x8ffffffe && uEax == UINT32_C(0x00494544))
+ fKeep = true;
+ else if ( uLeaf == 0x8fffffff
+ && RT_C_IS_PRINT(RT_BYTE1(uEax))
+ && RT_C_IS_PRINT(RT_BYTE2(uEax))
+ && RT_C_IS_PRINT(RT_BYTE3(uEax))
+ && RT_C_IS_PRINT(RT_BYTE4(uEax))
+ && RT_C_IS_PRINT(RT_BYTE1(uEbx))
+ && RT_C_IS_PRINT(RT_BYTE2(uEbx))
+ && RT_C_IS_PRINT(RT_BYTE3(uEbx))
+ && RT_C_IS_PRINT(RT_BYTE4(uEbx))
+ && RT_C_IS_PRINT(RT_BYTE1(uEcx))
+ && RT_C_IS_PRINT(RT_BYTE2(uEcx))
+ && RT_C_IS_PRINT(RT_BYTE3(uEcx))
+ && RT_C_IS_PRINT(RT_BYTE4(uEcx))
+ && RT_C_IS_PRINT(RT_BYTE1(uEdx))
+ && RT_C_IS_PRINT(RT_BYTE2(uEdx))
+ && RT_C_IS_PRINT(RT_BYTE3(uEdx))
+ && RT_C_IS_PRINT(RT_BYTE4(uEdx)) )
+ fKeep = true;
+ if (fKeep)
+ {
+ int rc = cpumR3CollectCpuIdInfoAddOne(ppaLeaves, pcLeaves,
+ uLeaf, 0, 0, uEax, uEbx, uEcx, uEdx, 0);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Determines the method the CPU uses to handle unknown CPUID leaves.
+ *
+ * @returns VBox status code.
+ * @param penmUnknownMethod Where to return the method.
+ * @param pDefUnknown Where to return default unknown values. This
+ * will be set, even if the resulting method
+ * doesn't actually needs it.
+ */
+VMMR3DECL(int) CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown)
+{
+ uint32_t uLastStd = ASMCpuId_EAX(0);
+ uint32_t uLastExt = ASMCpuId_EAX(0x80000000);
+ if (!ASMIsValidExtRange(uLastExt))
+ uLastExt = 0x80000000;
+
+ uint32_t auChecks[] =
+ {
+ uLastStd + 1,
+ uLastStd + 5,
+ uLastStd + 8,
+ uLastStd + 32,
+ uLastStd + 251,
+ uLastExt + 1,
+ uLastExt + 8,
+ uLastExt + 15,
+ uLastExt + 63,
+ uLastExt + 255,
+ 0x7fbbffcc,
+ 0x833f7872,
+ 0xefff2353,
+ 0x35779456,
+ 0x1ef6d33e,
+ };
+
+ static const uint32_t s_auValues[] =
+ {
+ 0xa95d2156,
+ 0x00000001,
+ 0x00000002,
+ 0x00000008,
+ 0x00000000,
+ 0x55773399,
+ 0x93401769,
+ 0x12039587,
+ };
+
+ /*
+ * Simple method, all zeros.
+ */
+ *penmUnknownMethod = CPUMUKNOWNCPUID_DEFAULTS;
+ pDefUnknown->eax = 0;
+ pDefUnknown->ebx = 0;
+ pDefUnknown->ecx = 0;
+ pDefUnknown->edx = 0;
+
+ /*
+ * Intel has been observed returning the last standard leaf.
+ */
+ uint32_t auLast[4];
+ ASMCpuIdExSlow(uLastStd, 0, 0, 0, &auLast[0], &auLast[1], &auLast[2], &auLast[3]);
+
+ uint32_t cChecks = RT_ELEMENTS(auChecks);
+ while (cChecks > 0)
+ {
+ uint32_t auCur[4];
+ ASMCpuIdExSlow(auChecks[cChecks - 1], 0, 0, 0, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
+ if (memcmp(auCur, auLast, sizeof(auCur)))
+ break;
+ cChecks--;
+ }
+ if (cChecks == 0)
+ {
+ /* Now, what happens when the input changes? Esp. ECX. */
+ uint32_t cTotal = 0;
+ uint32_t cSame = 0;
+ uint32_t cLastWithEcx = 0;
+ uint32_t cNeither = 0;
+ uint32_t cValues = RT_ELEMENTS(s_auValues);
+ while (cValues > 0)
+ {
+ uint32_t uValue = s_auValues[cValues - 1];
+ uint32_t auLastWithEcx[4];
+ ASMCpuIdExSlow(uLastStd, uValue, uValue, uValue,
+ &auLastWithEcx[0], &auLastWithEcx[1], &auLastWithEcx[2], &auLastWithEcx[3]);
+
+ cChecks = RT_ELEMENTS(auChecks);
+ while (cChecks > 0)
+ {
+ uint32_t auCur[4];
+ ASMCpuIdExSlow(auChecks[cChecks - 1], uValue, uValue, uValue, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
+ if (!memcmp(auCur, auLast, sizeof(auCur)))
+ {
+ cSame++;
+ if (!memcmp(auCur, auLastWithEcx, sizeof(auCur)))
+ cLastWithEcx++;
+ }
+ else if (!memcmp(auCur, auLastWithEcx, sizeof(auCur)))
+ cLastWithEcx++;
+ else
+ cNeither++;
+ cTotal++;
+ cChecks--;
+ }
+ cValues--;
+ }
+
+ Log(("CPUM: cNeither=%d cSame=%d cLastWithEcx=%d cTotal=%d\n", cNeither, cSame, cLastWithEcx, cTotal));
+ if (cSame == cTotal)
+ *penmUnknownMethod = CPUMUKNOWNCPUID_LAST_STD_LEAF;
+ else if (cLastWithEcx == cTotal)
+ *penmUnknownMethod = CPUMUKNOWNCPUID_LAST_STD_LEAF_WITH_ECX;
+ else
+ *penmUnknownMethod = CPUMUKNOWNCPUID_LAST_STD_LEAF;
+ pDefUnknown->eax = auLast[0];
+ pDefUnknown->ebx = auLast[1];
+ pDefUnknown->ecx = auLast[2];
+ pDefUnknown->edx = auLast[3];
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Unchanged register values?
+ */
+ cChecks = RT_ELEMENTS(auChecks);
+ while (cChecks > 0)
+ {
+ uint32_t const uLeaf = auChecks[cChecks - 1];
+ uint32_t cValues = RT_ELEMENTS(s_auValues);
+ while (cValues > 0)
+ {
+ uint32_t uValue = s_auValues[cValues - 1];
+ uint32_t auCur[4];
+ ASMCpuIdExSlow(uLeaf, uValue, uValue, uValue, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
+ if ( auCur[0] != uLeaf
+ || auCur[1] != uValue
+ || auCur[2] != uValue
+ || auCur[3] != uValue)
+ break;
+ cValues--;
+ }
+ if (cValues != 0)
+ break;
+ cChecks--;
+ }
+ if (cChecks == 0)
+ {
+ *penmUnknownMethod = CPUMUKNOWNCPUID_PASSTHRU;
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Just go with the simple method.
+ */
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Translates a unknow CPUID leaf method into the constant name (sans prefix).
+ *
+ * @returns Read only name string.
+ * @param enmUnknownMethod The method to translate.
+ */
+VMMR3DECL(const char *) CPUMR3CpuIdUnknownLeafMethodName(CPUMUKNOWNCPUID enmUnknownMethod)
+{
+ switch (enmUnknownMethod)
+ {
+ case CPUMUKNOWNCPUID_DEFAULTS: return "DEFAULTS";
+ case CPUMUKNOWNCPUID_LAST_STD_LEAF: return "LAST_STD_LEAF";
+ case CPUMUKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: return "LAST_STD_LEAF_WITH_ECX";
+ case CPUMUKNOWNCPUID_PASSTHRU: return "PASSTHRU";
+
+ case CPUMUKNOWNCPUID_INVALID:
+ case CPUMUKNOWNCPUID_END:
+ case CPUMUKNOWNCPUID_32BIT_HACK:
+ break;
+ }
+ return "Invalid-unknown-CPUID-method";
+}
+
+
+/**
+ * Detect the CPU vendor give n the
+ *
+ * @returns The vendor.
+ * @param uEAX EAX from CPUID(0).
+ * @param uEBX EBX from CPUID(0).
+ * @param uECX ECX from CPUID(0).
+ * @param uEDX EDX from CPUID(0).
+ */
+VMMR3DECL(CPUMCPUVENDOR) CPUMR3CpuIdDetectVendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
+{
+ if (ASMIsValidStdRange(uEAX))
+ {
+ if (ASMIsAmdCpuEx(uEBX, uECX, uEDX))
+ return CPUMCPUVENDOR_AMD;
+
+ if (ASMIsIntelCpuEx(uEBX, uECX, uEDX))
+ return CPUMCPUVENDOR_INTEL;
+
+ if (ASMIsViaCentaurCpuEx(uEBX, uECX, uEDX))
+ return CPUMCPUVENDOR_VIA;
+
+ if ( uEBX == UINT32_C(0x69727943) /* CyrixInstead */
+ && uECX == UINT32_C(0x64616574)
+ && uEDX == UINT32_C(0x736E4978))
+ return CPUMCPUVENDOR_CYRIX;
+
+ /* "Geode by NSC", example: family 5, model 9. */
+
+ /** @todo detect the other buggers... */
+ }
+
+ return CPUMCPUVENDOR_UNKNOWN;
+}
+
+
+/**
+ * Translates a CPU vendor enum value into the corresponding string constant.
+ *
+ * The named can be prefixed with 'CPUMCPUVENDOR_' to construct a valid enum
+ * value name. This can be useful when generating code.
+ *
+ * @returns Read only name string.
+ * @param enmVendor The CPU vendor value.
+ */
+VMMR3DECL(const char *) CPUMR3CpuVendorName(CPUMCPUVENDOR enmVendor)
+{
+ switch (enmVendor)
+ {
+ case CPUMCPUVENDOR_INTEL: return "INTEL";
+ case CPUMCPUVENDOR_AMD: return "AMD";
+ case CPUMCPUVENDOR_VIA: return "VIA";
+ case CPUMCPUVENDOR_CYRIX: return "CYRIX";
+ case CPUMCPUVENDOR_UNKNOWN: return "UNKNOWN";
+
+ case CPUMCPUVENDOR_INVALID:
+ case CPUMCPUVENDOR_32BIT_HACK:
+ break;
+ }
+ return "Invalid-cpu-vendor";
+}
+
+
+static PCCPUMCPUIDLEAF cpumR3CpuIdFindLeaf(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf)
+{
+ /* Could do binary search, doing linear now because I'm lazy. */
+ PCCPUMCPUIDLEAF pLeaf = paLeaves;
+ while (cLeaves-- > 0)
+ {
+ if (pLeaf->uLeaf == uLeaf)
+ return pLeaf;
+ pLeaf++;
+ }
+ return NULL;
+}
+
+
+int cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCPUMFEATURES pFeatures)
+{
+ RT_ZERO(*pFeatures);
+ if (cLeaves >= 2)
+ {
+ AssertLogRelReturn(paLeaves[0].uLeaf == 0, VERR_CPUM_IPE_1);
+ AssertLogRelReturn(paLeaves[1].uLeaf == 1, VERR_CPUM_IPE_1);
+
+ pFeatures->enmCpuVendor = CPUMR3CpuIdDetectVendorEx(paLeaves[0].uEax,
+ paLeaves[0].uEbx,
+ paLeaves[0].uEcx,
+ paLeaves[0].uEdx);
+ pFeatures->uFamily = ASMGetCpuFamily(paLeaves[1].uEax);
+ pFeatures->uModel = ASMGetCpuModel(paLeaves[1].uEax, pFeatures->enmCpuVendor == CPUMCPUVENDOR_INTEL);
+ pFeatures->uStepping = ASMGetCpuStepping(paLeaves[1].uEax);
+ pFeatures->enmMicroarch = CPUMR3CpuIdDetermineMicroarchEx((CPUMCPUVENDOR)pFeatures->enmCpuVendor,
+ pFeatures->uFamily,
+ pFeatures->uModel,
+ pFeatures->uStepping);
+
+ PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x80000008);
+ if (pLeaf)
+ pFeatures->cMaxPhysAddrWidth = pLeaf->uEax & 0xff;
+ else if (paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_PSE36)
+ pFeatures->cMaxPhysAddrWidth = 36;
+ else
+ pFeatures->cMaxPhysAddrWidth = 32;
+
+ /* Standard features. */
+ pFeatures->fMsr = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_MSR);
+ pFeatures->fApic = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_APIC);
+ pFeatures->fX2Apic = RT_BOOL(paLeaves[1].uEcx & X86_CPUID_FEATURE_ECX_X2APIC);
+ pFeatures->fPse = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_PSE);
+ pFeatures->fPse36 = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_PSE36);
+ pFeatures->fPae = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_PAE);
+ pFeatures->fPat = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_PAT);
+ pFeatures->fFxSaveRstor = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_FXSR);
+ pFeatures->fSysEnter = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_SEP);
+ pFeatures->fHypervisorPresent = RT_BOOL(paLeaves[1].uEcx & X86_CPUID_FEATURE_ECX_HVP);
+ pFeatures->fMonitorMWait = RT_BOOL(paLeaves[1].uEcx & X86_CPUID_FEATURE_ECX_MONITOR);
+
+ /* Extended features. */
+ PCCPUMCPUIDLEAF const pExtLeaf = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x80000001);
+ if (pExtLeaf)
+ {
+ pFeatures->fLongMode = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
+ pFeatures->fSysCall = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_SYSCALL);
+ pFeatures->fNoExecute = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_NX);
+ pFeatures->fLahfSahf = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
+ pFeatures->fRdTscP = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
+ }
+
+ if ( pExtLeaf
+ && pFeatures->enmCpuVendor == CPUMCPUVENDOR_AMD)
+ {
+ /* AMD features. */
+ pFeatures->fMsr |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_MSR);
+ pFeatures->fApic |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_APIC);
+ pFeatures->fPse |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_PSE);
+ pFeatures->fPse36 |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_PSE36);
+ pFeatures->fPae |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_PAE);
+ pFeatures->fPat |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_PAT);
+ pFeatures->fFxSaveRstor |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_FXSR);
+ }
+
+ /*
+ * Quirks.
+ */
+ pFeatures->fLeakyFxSR = pExtLeaf
+ && (pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
+ && pFeatures->enmCpuVendor == CPUMCPUVENDOR_AMD
+ && pFeatures->uFamily >= 6 /* K7 and up */;
+ }
+ else
+ AssertLogRelReturn(cLeaves == 0, VERR_CPUM_IPE_1);
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/VMM/VMMR3/CPUMR3Db.cpp b/src/VBox/VMM/VMMR3/CPUMR3Db.cpp
new file mode 100644
index 00000000..26cfb857
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/CPUMR3Db.cpp
@@ -0,0 +1,780 @@
+/* $Id: CPUMR3Db.cpp $ */
+/** @file
+ * CPUM - CPU database part.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+/*******************************************************************************
+* Header Files *
+*******************************************************************************/
+#define LOG_GROUP LOG_GROUP_CPUM
+#include <VBox/vmm/cpum.h>
+#include "CPUMInternal.h"
+#include <VBox/vmm/vm.h>
+
+#include <VBox/err.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/mem.h>
+#include <iprt/string.h>
+
+
+/*******************************************************************************
+* Structures and Typedefs *
+*******************************************************************************/
+typedef struct CPUMDBENTRY
+{
+ /** The CPU name. */
+ const char *pszName;
+ /** The full CPU name. */
+ const char *pszFullName;
+ /** The CPU vendor (CPUMCPUVENDOR). */
+ uint8_t enmVendor;
+ /** The CPU family. */
+ uint8_t uFamily;
+ /** The CPU model. */
+ uint8_t uModel;
+ /** The CPU stepping. */
+ uint8_t uStepping;
+ /** The microarchitecture. */
+ CPUMMICROARCH enmMicroarch;
+ /** Scalable bus frequency used for reporting other frequencies. */
+ uint64_t uScalableBusFreq;
+ /** Flags (TBD). */
+ uint32_t fFlags;
+ /** The maximum physical address with of the CPU. This should correspond to
+ * the value in CPUID leaf 0x80000008 when present. */
+ uint8_t cMaxPhysAddrWidth;
+ /** Pointer to an array of CPUID leaves. */
+ PCCPUMCPUIDLEAF paCpuIdLeaves;
+ /** The number of CPUID leaves in the array paCpuIdLeaves points to. */
+ uint32_t cCpuIdLeaves;
+ /** The method used to deal with unknown CPUID leaves. */
+ CPUMUKNOWNCPUID enmUnknownCpuId;
+ /** The default unknown CPUID value. */
+ CPUMCPUID DefUnknownCpuId;
+
+ /** MSR mask. Several microarchitectures ignore higher bits of the */
+ uint32_t fMsrMask;
+
+ /** The number of ranges in the table pointed to b paMsrRanges. */
+ uint32_t cMsrRanges;
+ /** MSR ranges for this CPU. */
+ PCCPUMMSRRANGE paMsrRanges;
+} CPUMDBENTRY;
+
+
+/*******************************************************************************
+* Defined Constants And Macros *
+*******************************************************************************/
+
+/** @def NULL_ALONE
+ * For eliminating an unnecessary data dependency in standalone builds (for
+ * VBoxSVC). */
+/** @def ZERO_ALONE
+ * For eliminating an unnecessary data size dependency in standalone builds (for
+ * VBoxSVC). */
+#ifndef CPUM_DB_STANDALONE
+# define NULL_ALONE(a_aTable) a_aTable
+# define ZERO_ALONE(a_cTable) a_cTable
+#else
+# define NULL_ALONE(a_aTable) NULL
+# define ZERO_ALONE(a_cTable) 0
+#endif
+
+
+/** @name Short macros for the MSR range entries.
+ *
+ * These are rather cryptic, but this is to reduce the attack on the right
+ * margin.
+ *
+ * @{ */
+/** Alias one MSR onto another (a_uTarget). */
+#define MAL(a_uMsr, a_szName, a_uTarget) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_MsrAlias, kCpumMsrWrFn_MsrAlias, 0, a_uTarget, 0, 0, a_szName)
+/** Functions handles everything. */
+#define MFN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
+/** Functions handles everything, with GP mask. */
+#define MFG(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrGpMask) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, a_fWrGpMask, a_szName)
+/** Function handlers, read-only. */
+#define MFO(a_uMsr, a_szName, a_enmRdFnSuff) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_ReadOnly, 0, 0, 0, UINT64_MAX, a_szName)
+/** Function handlers, ignore all writes. */
+#define MFI(a_uMsr, a_szName, a_enmRdFnSuff) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_IgnoreWrite, 0, 0, UINT64_MAX, 0, a_szName)
+/** Function handlers, with value. */
+#define MFV(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, 0, 0, a_szName)
+/** Function handlers, with write ignore mask. */
+#define MFW(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrIgnMask) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, a_fWrIgnMask, 0, a_szName)
+/** Function handlers, extended version. */
+#define MFX(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
+/** Function handlers, with CPUMCPU storage variable. */
+#define MFS(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
+ RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, 0, 0, a_szName)
+/** Function handlers, with CPUMCPU storage variable, ignore mask and GP mask. */
+#define MFZ(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember, a_fWrIgnMask, a_fWrGpMask) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
+ RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, a_fWrIgnMask, a_fWrGpMask, a_szName)
+/** Read-only fixed value. */
+#define MVO(a_uMsr, a_szName, a_uValue) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
+/** Read-only fixed value, ignores all writes. */
+#define MVI(a_uMsr, a_szName, a_uValue) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
+/** Read fixed value, ignore writes outside GP mask. */
+#define MVG(a_uMsr, a_szName, a_uValue, a_fWrGpMask) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, 0, a_fWrGpMask, a_szName)
+/** Read fixed value, extended version with both GP and ignore masks. */
+#define MVX(a_uMsr, a_szName, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
+/** The short form, no CPUM backing. */
+#define MSN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
+ RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
+ a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
+
+/** Range: Functions handles everything. */
+#define RFN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
+ RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
+/** Range: Read fixed value, read-only. */
+#define RVO(a_uFirst, a_uLast, a_szName, a_uValue) \
+ RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
+/** Range: Read fixed value, ignore writes. */
+#define RVI(a_uFirst, a_uLast, a_szName, a_uValue) \
+ RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
+/** Range: The short form, no CPUM backing. */
+#define RSN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
+ RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
+ a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
+
+/** Internal form used by the macros. */
+#ifdef VBOX_WITH_STATISTICS
+# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
+ { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName, \
+ { 0 }, { 0 }, { 0 }, { 0 } }
+#else
+# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
+ { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName }
+#endif
+/** @} */
+
+
+#include "cpus/Intel_Core_i7_3960X.h"
+#include "cpus/Intel_Core_i5_3570.h"
+#include "cpus/Intel_Core_i7_2635QM.h"
+#include "cpus/Intel_Xeon_X5482_3_20GHz.h"
+#include "cpus/Intel_Pentium_M_processor_2_00GHz.h"
+#include "cpus/Intel_Pentium_4_3_00GHz.h"
+
+#include "cpus/AMD_FX_8150_Eight_Core.h"
+#include "cpus/AMD_Phenom_II_X6_1100T.h"
+#include "cpus/Quad_Core_AMD_Opteron_2384.h"
+#include "cpus/AMD_Athlon_64_X2_Dual_Core_4200.h"
+#include "cpus/AMD_Athlon_64_3200.h"
+
+#include "cpus/VIA_QuadCore_L4700_1_2_GHz.h"
+
+
+
+/**
+ * The database entries.
+ *
+ * 1. The first entry is special. It is the fallback for unknown
+ * processors. Thus, it better be pretty representative.
+ *
+ * 2. The first entry for a CPU vendor is likewise important as it is
+ * the default entry for that vendor.
+ *
+ * Generally we put the most recent CPUs first, since these tend to have the
+ * most complicated and backwards compatible list of MSRs.
+ */
+static CPUMDBENTRY const * const g_apCpumDbEntries[] =
+{
+#ifdef VBOX_CPUDB_Intel_Core_i5_3570
+ &g_Entry_Intel_Core_i5_3570,
+#endif
+#ifdef VBOX_CPUDB_Intel_Core_i7_3960X
+ &g_Entry_Intel_Core_i7_3960X,
+#endif
+#ifdef VBOX_CPUDB_Intel_Core_i7_2635QM
+ &g_Entry_Intel_Core_i7_2635QM,
+#endif
+#ifdef Intel_Pentium_M_processor_2_00GHz
+ &g_Entry_Intel_Pentium_M_processor_2_00GHz,
+#endif
+#ifdef VBOX_CPUDB_Intel_Xeon_X5482_3_20GHz
+ &g_Entry_Intel_Xeon_X5482_3_20GHz,
+#endif
+#ifdef VBOX_CPUDB_Intel_Pentium_4_3_00GHz
+ &g_Entry_Intel_Pentium_4_3_00GHz,
+#endif
+
+#ifdef VBOX_CPUDB_AMD_FX_8150_Eight_Core
+ &g_Entry_AMD_FX_8150_Eight_Core,
+#endif
+#ifdef VBOX_CPUDB_AMD_Phenom_II_X6_1100T
+ &g_Entry_AMD_Phenom_II_X6_1100T,
+#endif
+#ifdef VBOX_CPUDB_Quad_Core_AMD_Opteron_2384
+ &g_Entry_Quad_Core_AMD_Opteron_2384,
+#endif
+#ifdef VBOX_CPUDB_AMD_Athlon_64_X2_Dual_Core_4200
+ &g_Entry_AMD_Athlon_64_X2_Dual_Core_4200,
+#endif
+#ifdef VBOX_CPUDB_AMD_Athlon_64_3200
+ &g_Entry_AMD_Athlon_64_3200,
+#endif
+
+#ifdef VBOX_CPUDB_VIA_QuadCore_L4700_1_2_GHz
+ &g_Entry_VIA_QuadCore_L4700_1_2_GHz,
+#endif
+};
+
+
+#ifndef CPUM_DB_STANDALONE
+
+/**
+ * Binary search used by cpumR3MsrRangesInsert and has some special properties
+ * wrt to mismatches.
+ *
+ * @returns Insert location.
+ * @param paMsrRanges The MSR ranges to search.
+ * @param cMsrRanges The number of MSR ranges.
+ * @param uMsr What to search for.
+ */
+static uint32_t cpumR3MsrRangesBinSearch(PCCPUMMSRRANGE paMsrRanges, uint32_t cMsrRanges, uint32_t uMsr)
+{
+ if (!cMsrRanges)
+ return 0;
+
+ uint32_t iStart = 0;
+ uint32_t iLast = cMsrRanges - 1;
+ for (;;)
+ {
+ uint32_t i = iStart + (iLast - iStart + 1) / 2;
+ if ( uMsr >= paMsrRanges[i].uFirst
+ && uMsr <= paMsrRanges[i].uLast)
+ return i;
+ if (uMsr < paMsrRanges[i].uFirst)
+ {
+ if (i <= iStart)
+ return i;
+ iLast = i - 1;
+ }
+ else
+ {
+ if (i >= iLast)
+ {
+ if (i < cMsrRanges)
+ i++;
+ return i;
+ }
+ iStart = i + 1;
+ }
+ }
+}
+
+
+/**
+ * Ensures that there is space for at least @a cNewRanges in the table,
+ * reallocating the table if necessary.
+ *
+ * @returns Pointer to the MSR ranges on success, NULL on failure. On failure
+ * @a *ppaMsrRanges is freed and set to NULL.
+ * @param ppaMsrRanges The variable pointing to the ranges (input/output).
+ * @param cMsrRanges The current number of ranges.
+ * @param cNewRanges The number of ranges to be added.
+ */
+static PCPUMMSRRANGE cpumR3MsrRangesEnsureSpace(PCPUMMSRRANGE *ppaMsrRanges, uint32_t cMsrRanges, uint32_t cNewRanges)
+{
+ uint32_t cMsrRangesAllocated = RT_ALIGN_32(cMsrRanges, 16);
+ if (cMsrRangesAllocated < cMsrRanges + cNewRanges)
+ {
+ uint32_t cNew = RT_ALIGN_32(cMsrRanges + cNewRanges, 16);
+ void *pvNew = RTMemRealloc(*ppaMsrRanges, cNew * sizeof(**ppaMsrRanges));
+ if (!pvNew)
+ {
+ RTMemFree(*ppaMsrRanges);
+ *ppaMsrRanges = NULL;
+ return NULL;
+ }
+ *ppaMsrRanges = (PCPUMMSRRANGE)pvNew;
+ }
+ return *ppaMsrRanges;
+}
+
+
+/**
+ * Inserts a new MSR range in into an sorted MSR range array.
+ *
+ * If the new MSR range overlaps existing ranges, the existing ones will be
+ * adjusted/removed to fit in the new one.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS
+ * @retval VERR_NO_MEMORY
+ *
+ * @param ppaMsrRanges The variable pointing to the ranges (input/output).
+ * @param pcMsrRanges The variable holding number of ranges.
+ * @param pNewRange The new range.
+ */
+int cpumR3MsrRangesInsert(PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange)
+{
+ uint32_t cMsrRanges = *pcMsrRanges;
+ PCPUMMSRRANGE paMsrRanges = *ppaMsrRanges;
+
+ Assert(pNewRange->uLast >= pNewRange->uFirst);
+ Assert(pNewRange->enmRdFn > kCpumMsrRdFn_Invalid && pNewRange->enmRdFn < kCpumMsrRdFn_End);
+ Assert(pNewRange->enmWrFn > kCpumMsrWrFn_Invalid && pNewRange->enmWrFn < kCpumMsrWrFn_End);
+
+ /*
+ * Optimize the linear insertion case where we add new entries at the end.
+ */
+ if ( cMsrRanges > 0
+ && paMsrRanges[cMsrRanges - 1].uLast < pNewRange->uFirst)
+ {
+ paMsrRanges = cpumR3MsrRangesEnsureSpace(ppaMsrRanges, cMsrRanges, 1);
+ if (!paMsrRanges)
+ return VERR_NO_MEMORY;
+ paMsrRanges[cMsrRanges] = *pNewRange;
+ *pcMsrRanges += 1;
+ }
+ else
+ {
+ uint32_t i = cpumR3MsrRangesBinSearch(paMsrRanges, cMsrRanges, pNewRange->uFirst);
+ Assert(i == cMsrRanges || pNewRange->uFirst <= paMsrRanges[i].uLast);
+ Assert(i == 0 || pNewRange->uFirst > paMsrRanges[i - 1].uLast);
+
+ /*
+ * Adding an entirely new entry?
+ */
+ if ( i >= cMsrRanges
+ || pNewRange->uLast < paMsrRanges[i].uFirst)
+ {
+ paMsrRanges = cpumR3MsrRangesEnsureSpace(ppaMsrRanges, cMsrRanges, 1);
+ if (!paMsrRanges)
+ return VERR_NO_MEMORY;
+ if (i < cMsrRanges)
+ memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
+ paMsrRanges[i] = *pNewRange;
+ *pcMsrRanges += 1;
+ }
+ /*
+ * Replace existing entry?
+ */
+ else if ( pNewRange->uFirst == paMsrRanges[i].uFirst
+ && pNewRange->uLast == paMsrRanges[i].uLast)
+ paMsrRanges[i] = *pNewRange;
+ /*
+ * Splitting an existing entry?
+ */
+ else if ( pNewRange->uFirst > paMsrRanges[i].uFirst
+ && pNewRange->uLast < paMsrRanges[i].uLast)
+ {
+ paMsrRanges = cpumR3MsrRangesEnsureSpace(ppaMsrRanges, cMsrRanges, 2);
+ if (!paMsrRanges)
+ return VERR_NO_MEMORY;
+ if (i < cMsrRanges)
+ memmove(&paMsrRanges[i + 2], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
+ paMsrRanges[i + 1] = *pNewRange;
+ paMsrRanges[i + 2] = paMsrRanges[i];
+ paMsrRanges[i ].uLast = pNewRange->uFirst - 1;
+ paMsrRanges[i + 2].uFirst = pNewRange->uLast + 1;
+ *pcMsrRanges += 2;
+ }
+ /*
+ * Complicated scenarios that can affect more than one range.
+ *
+ * The current code does not optimize memmove calls when replacing
+ * one or more existing ranges, because it's tedious to deal with and
+ * not expected to be a frequent usage scenario.
+ */
+ else
+ {
+ /* Adjust start of first match? */
+ if ( pNewRange->uFirst <= paMsrRanges[i].uFirst
+ && pNewRange->uLast < paMsrRanges[i].uLast)
+ paMsrRanges[i].uFirst = pNewRange->uLast + 1;
+ else
+ {
+ /* Adjust end of first match? */
+ if (pNewRange->uFirst > paMsrRanges[i].uFirst)
+ {
+ Assert(paMsrRanges[i].uLast >= pNewRange->uFirst);
+ paMsrRanges[i].uLast = pNewRange->uFirst - 1;
+ i++;
+ }
+ /* Replace the whole first match (lazy bird). */
+ else
+ {
+ if (i + 1 < cMsrRanges)
+ memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
+ cMsrRanges = *pcMsrRanges -= 1;
+ }
+
+ /* Do the new range affect more ranges? */
+ while ( i < cMsrRanges
+ && pNewRange->uLast >= paMsrRanges[i].uFirst)
+ {
+ if (pNewRange->uLast < paMsrRanges[i].uLast)
+ {
+ /* Adjust the start of it, then we're done. */
+ paMsrRanges[i].uFirst = pNewRange->uLast + 1;
+ break;
+ }
+
+ /* Remove it entirely. */
+ if (i + 1 < cMsrRanges)
+ memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
+ cMsrRanges = *pcMsrRanges -= 1;
+ }
+ }
+
+ /* Now, perform a normal insertion. */
+ paMsrRanges = cpumR3MsrRangesEnsureSpace(ppaMsrRanges, cMsrRanges, 1);
+ if (!paMsrRanges)
+ return VERR_NO_MEMORY;
+ if (i < cMsrRanges)
+ memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
+ paMsrRanges[i] = *pNewRange;
+ *pcMsrRanges += 1;
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for cpumR3MsrApplyFudge that applies one table.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the cross context VM structure.
+ * @param paRanges Array of MSRs to fudge.
+ * @param cRanges Number of MSRs in the array.
+ */
+static int cpumR3MsrApplyFudgeTable(PVM pVM, PCCPUMMSRRANGE paRanges, size_t cRanges)
+{
+ for (uint32_t i = 0; i < cRanges; i++)
+ if (!cpumLookupMsrRange(pVM, paRanges[i].uFirst))
+ {
+ LogRel(("CPUM: MSR fudge: %#010x %s\n", paRanges[i].uFirst, paRanges[i].szName));
+ int rc = cpumR3MsrRangesInsert(&pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges,
+ &paRanges[i]);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Fudges the MSRs that guest are known to access in some odd cases.
+ *
+ * A typical example is a VM that has been moved between different hosts where
+ * for instance the cpu vendor differs.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the cross context VM structure.
+ */
+int cpumR3MsrApplyFudge(PVM pVM)
+{
+ /*
+ * Basic.
+ */
+ static CPUMMSRRANGE const s_aFudgeMsrs[] =
+ {
+ MFO(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr),
+ MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX),
+ MVO(0x00000017, "IA32_PLATFORM_ID", 0),
+ MFN(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase),
+ MVI(0x0000008b, "BIOS_SIGN", 0),
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0),
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x005, 0, 0),
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, ~(uint64_t)UINT32_MAX, 0),
+ MFN(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable),
+ MFN(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl),
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp),
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp),
+ MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp),
+ MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, ~(uint64_t)0xc07),
+ MFN(0x00000400, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ };
+ int rc = cpumR3MsrApplyFudgeTable(pVM, &s_aFudgeMsrs[0], RT_ELEMENTS(s_aFudgeMsrs));
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * XP might mistake opterons and other newer CPUs for P4s.
+ */
+ if (pVM->cpum.s.GuestFeatures.uFamily >= 0xf)
+ {
+ static CPUMMSRRANGE const s_aP4FudgeMsrs[] =
+ {
+ MFX(0x0000002c, "P4_EBC_FREQUENCY_ID", IntelP4EbcFrequencyId, IntelP4EbcFrequencyId, 0xf12010f, UINT64_MAX, 0),
+ };
+ rc = cpumR3MsrApplyFudgeTable(pVM, &s_aP4FudgeMsrs[0], RT_ELEMENTS(s_aP4FudgeMsrs));
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+ return rc;
+}
+
+
+int cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo)
+{
+ CPUMDBENTRY const *pEntry = NULL;
+ int rc;
+
+ if (!strcmp(pszName, "host"))
+ {
+ /*
+ * Create a CPU database entry for the host CPU. This means getting
+ * the CPUID bits from the real CPU and grabbing the closest matching
+ * database entry for MSRs.
+ */
+ rc = CPUMR3CpuIdDetectUnknownLeafMethod(&pInfo->enmUnknownCpuIdMethod, &pInfo->DefCpuId);
+ if (RT_FAILURE(rc))
+ return rc;
+ rc = CPUMR3CpuIdCollectLeaves(&pInfo->paCpuIdLeavesR3, &pInfo->cCpuIdLeaves);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /* Lookup database entry for MSRs. */
+ CPUMCPUVENDOR const enmVendor = CPUMR3CpuIdDetectVendorEx(pInfo->paCpuIdLeavesR3[0].uEax,
+ pInfo->paCpuIdLeavesR3[0].uEbx,
+ pInfo->paCpuIdLeavesR3[0].uEcx,
+ pInfo->paCpuIdLeavesR3[0].uEdx);
+ uint32_t const uStd1Eax = pInfo->paCpuIdLeavesR3[1].uEax;
+ uint8_t const uFamily = ASMGetCpuFamily(uStd1Eax);
+ uint8_t const uModel = ASMGetCpuModel(uStd1Eax, enmVendor == CPUMCPUVENDOR_INTEL);
+ uint8_t const uStepping = ASMGetCpuStepping(uStd1Eax);
+ CPUMMICROARCH const enmMicroarch = CPUMR3CpuIdDetermineMicroarchEx(enmVendor, uFamily, uModel, uStepping);
+
+ for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
+ {
+ CPUMDBENTRY const *pCur = g_apCpumDbEntries[i];
+ if ((CPUMCPUVENDOR)pCur->enmVendor == enmVendor)
+ {
+ /* Match against Family, Microarch, model and stepping. Except
+ for family, always match the closer with preference given to
+ the later/older ones. */
+ if (pCur->uFamily == uFamily)
+ {
+ if (pCur->enmMicroarch == enmMicroarch)
+ {
+ if (pCur->uModel == uModel)
+ {
+ if (pCur->uStepping == uStepping)
+ {
+ /* Perfect match. */
+ pEntry = pCur;
+ break;
+ }
+
+ if ( !pEntry
+ || pEntry->uModel != uModel
+ || pEntry->enmMicroarch != enmMicroarch
+ || pEntry->uFamily != uFamily)
+ pEntry = pCur;
+ else if ( pCur->uStepping >= uStepping
+ ? pCur->uStepping < pEntry->uStepping || pEntry->uStepping < uStepping
+ : pCur->uStepping > pEntry->uStepping)
+ pEntry = pCur;
+ }
+ else if ( !pEntry
+ || pEntry->enmMicroarch != enmMicroarch
+ || pEntry->uFamily != uFamily)
+ pEntry = pCur;
+ else if ( pCur->uModel >= uModel
+ ? pCur->uModel < pEntry->uModel || pEntry->uModel < uModel
+ : pCur->uModel > pEntry->uModel)
+ pEntry = pCur;
+ }
+ else if ( !pEntry
+ || pEntry->uFamily != uFamily)
+ pEntry = pCur;
+ else if ( pCur->enmMicroarch >= enmMicroarch
+ ? pCur->enmMicroarch < pEntry->enmMicroarch || pEntry->enmMicroarch < enmMicroarch
+ : pCur->enmMicroarch > pEntry->enmMicroarch)
+ pEntry = pCur;
+ }
+ /* We don't do closeness matching on family, we use the first
+ entry for the CPU vendor instead. (P4 workaround.) */
+ else if (!pEntry)
+ pEntry = pCur;
+ }
+ }
+
+ if (pEntry)
+ LogRel(("CPUM: Matched host CPU %s %#x/%#x/%#x %s with CPU DB entry '%s' (%s %#x/%#x/%#x %s).\n",
+ CPUMR3CpuVendorName(enmVendor), uFamily, uModel, uStepping, CPUMR3MicroarchName(enmMicroarch),
+ pEntry->pszName, CPUMR3CpuVendorName((CPUMCPUVENDOR)pEntry->enmVendor), pEntry->uFamily, pEntry->uModel,
+ pEntry->uStepping, CPUMR3MicroarchName(pEntry->enmMicroarch) ));
+ else
+ {
+ pEntry = g_apCpumDbEntries[0];
+ LogRel(("CPUM: No matching processor database entry %s %#x/%#x/%#x %s, falling back on '%s'.\n",
+ CPUMR3CpuVendorName(enmVendor), uFamily, uModel, uStepping, CPUMR3MicroarchName(enmMicroarch),
+ pEntry->pszName));
+ }
+ }
+ else
+ {
+ /*
+ * We're supposed to be emulating a specific CPU that is included in
+ * our CPU database. The CPUID tables needs to be copied onto the
+ * heap so the caller can modify them and so they can be freed like
+ * in the host case above.
+ */
+ for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
+ if (!strcmp(pszName, g_apCpumDbEntries[i]->pszName))
+ {
+ pEntry = g_apCpumDbEntries[i];
+ break;
+ }
+ if (!pEntry)
+ {
+ LogRel(("CPUM: Cannot locate any CPU by the name '%s'\n", pszName));
+ return VERR_CPUM_DB_CPU_NOT_FOUND;
+ }
+
+ pInfo->cCpuIdLeaves = pEntry->cCpuIdLeaves;
+ if (pEntry->cCpuIdLeaves)
+ {
+ pInfo->paCpuIdLeavesR3 = (PCPUMCPUIDLEAF)RTMemDup(pEntry->paCpuIdLeaves,
+ sizeof(pEntry->paCpuIdLeaves[0]) * pEntry->cCpuIdLeaves);
+ if (!pInfo->paCpuIdLeavesR3)
+ return VERR_NO_MEMORY;
+ }
+ else
+ pInfo->paCpuIdLeavesR3 = NULL;
+
+ pInfo->enmUnknownCpuIdMethod = pEntry->enmUnknownCpuId;
+ pInfo->DefCpuId = pEntry->DefUnknownCpuId;
+
+ LogRel(("CPUM: Using CPU DB entry '%s' (%s %#x/%#x/%#x %s).\n",
+ pEntry->pszName, CPUMR3CpuVendorName((CPUMCPUVENDOR)pEntry->enmVendor),
+ pEntry->uFamily, pEntry->uModel, pEntry->uStepping, CPUMR3MicroarchName(pEntry->enmMicroarch) ));
+ }
+
+ pInfo->fMsrMask = pEntry->fMsrMask;
+ pInfo->iFirstExtCpuIdLeaf = 0; /* Set by caller. */
+ pInfo->uPadding = 0;
+ pInfo->uScalableBusFreq = pEntry->uScalableBusFreq;
+ pInfo->paCpuIdLeavesR0 = NIL_RTR0PTR;
+ pInfo->paMsrRangesR0 = NIL_RTR0PTR;
+ pInfo->paCpuIdLeavesRC = NIL_RTRCPTR;
+ pInfo->paMsrRangesRC = NIL_RTRCPTR;
+
+ /*
+ * Copy the MSR range.
+ */
+ uint32_t cMsrs = 0;
+ PCPUMMSRRANGE paMsrs = NULL;
+
+ PCCPUMMSRRANGE pCurMsr = pEntry->paMsrRanges;
+ uint32_t cLeft = pEntry->cMsrRanges;
+ while (cLeft-- > 0)
+ {
+ rc = cpumR3MsrRangesInsert(&paMsrs, &cMsrs, pCurMsr);
+ if (RT_FAILURE(rc))
+ {
+ Assert(!paMsrs); /* The above function frees this. */
+ RTMemFree(pInfo->paCpuIdLeavesR3);
+ pInfo->paCpuIdLeavesR3 = NULL;
+ return rc;
+ }
+ pCurMsr++;
+ }
+
+ pInfo->paMsrRangesR3 = paMsrs;
+ pInfo->cMsrRanges = cMsrs;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Register statistics for the MSRs.
+ *
+ * This must not be called before the MSRs have been finalized and moved to the
+ * hyper heap.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the cross context VM structure.
+ */
+int cpumR3MsrRegStats(PVM pVM)
+{
+ /*
+ * Global statistics.
+ */
+ PCPUM pCpum = &pVM->cpum.s;
+ STAM_REL_REG(pVM, &pCpum->cMsrReads, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/Reads",
+ STAMUNIT_OCCURENCES, "All RDMSRs making it to CPUM.");
+ STAM_REL_REG(pVM, &pCpum->cMsrReadsRaiseGp, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/ReadsRaisingGP",
+ STAMUNIT_OCCURENCES, "RDMSR raising #GPs, except unknown MSRs.");
+ STAM_REL_REG(pVM, &pCpum->cMsrReadsUnknown, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/ReadsUnknown",
+ STAMUNIT_OCCURENCES, "RDMSR on unknown MSRs (raises #GP).");
+ STAM_REL_REG(pVM, &pCpum->cMsrWrites, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/Writes",
+ STAMUNIT_OCCURENCES, "All RDMSRs making it to CPUM.");
+ STAM_REL_REG(pVM, &pCpum->cMsrWritesRaiseGp, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesRaisingGP",
+ STAMUNIT_OCCURENCES, "WRMSR raising #GPs, except unknown MSRs.");
+ STAM_REL_REG(pVM, &pCpum->cMsrWritesToIgnoredBits, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesToIgnoredBits",
+ STAMUNIT_OCCURENCES, "Writing of ignored bits.");
+ STAM_REL_REG(pVM, &pCpum->cMsrWritesUnknown, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesUnknown",
+ STAMUNIT_OCCURENCES, "WRMSR on unknown MSRs (raises #GP).");
+
+
+# ifdef VBOX_WITH_STATISTICS
+ /*
+ * Per range.
+ */
+ PCPUMMSRRANGE paRanges = pVM->cpum.s.GuestInfo.paMsrRangesR3;
+ uint32_t cRanges = pVM->cpum.s.GuestInfo.cMsrRanges;
+ for (uint32_t i = 0; i < cRanges; i++)
+ {
+ char szName[160];
+ ssize_t cchName;
+
+ if (paRanges[i].uFirst == paRanges[i].uLast)
+ cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%s",
+ paRanges[i].uFirst, paRanges[i].szName);
+ else
+ cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%#010x-%s",
+ paRanges[i].uFirst, paRanges[i].uLast, paRanges[i].szName);
+
+ RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-reads");
+ STAMR3Register(pVM, &paRanges[i].cReads, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, "RDMSR");
+
+ RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-writes");
+ STAMR3Register(pVM, &paRanges[i].cWrites, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR");
+
+ RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-GPs");
+ STAMR3Register(pVM, &paRanges[i].cGps, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "#GPs");
+
+ RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-ign-bits-writes");
+ STAMR3Register(pVM, &paRanges[i].cIgnoredBits, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR w/ ignored bits");
+ }
+# endif /* VBOX_WITH_STATISTICS */
+
+ return VINF_SUCCESS;
+}
+
+#endif /* !CPUM_DB_STANDALONE */
+
diff --git a/src/VBox/VMM/VMMR3/CSAM.cpp b/src/VBox/VMM/VMMR3/CSAM.cpp
index 5946b9e2..86cce499 100644
--- a/src/VBox/VMM/VMMR3/CSAM.cpp
+++ b/src/VBox/VMM/VMMR3/CSAM.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2012 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -26,29 +26,33 @@
#include <VBox/vmm/cpumdis.h>
#include <VBox/vmm/pgm.h>
#include <VBox/vmm/iom.h>
-#include <VBox/sup.h>
#include <VBox/vmm/mm.h>
#include <VBox/vmm/em.h>
+#include <VBox/vmm/hm.h>
#ifdef VBOX_WITH_REM
# include <VBox/vmm/rem.h>
#endif
#include <VBox/vmm/selm.h>
#include <VBox/vmm/trpm.h>
#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/ssm.h>
#include <VBox/param.h>
#include <iprt/avl.h>
#include <iprt/asm.h>
#include <iprt/thread.h>
#include "CSAMInternal.h"
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+
#include <VBox/dbg.h>
+#include <VBox/sup.h>
#include <VBox/err.h>
-#include <VBox/vmm/ssm.h>
#include <VBox/log.h>
-#include <iprt/assert.h>
-#include <iprt/string.h>
+
#include <VBox/dis.h>
#include <VBox/disopcode.h>
+#include <iprt/assert.h>
+#include <iprt/string.h>
#include "internal/pgm.h"
@@ -82,13 +86,16 @@ static int csamAnalyseCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC
/** @todo Temporary for debugging. */
static bool fInCSAMCodePageInvalidate = false;
+#ifdef VBOX_WITH_DEBUGGER
+static FNDBGCCMD csamr3CmdOn;
+static FNDBGCCMD csamr3CmdOff;
+#endif
+
+
/*******************************************************************************
* Global Variables *
*******************************************************************************/
#ifdef VBOX_WITH_DEBUGGER
-static DECLCALLBACK(int) csamr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
-static DECLCALLBACK(int) csamr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
-
/** Command descriptors. */
static const DBGCCMD g_aCmds[] =
{
@@ -213,10 +220,22 @@ static const SSMFIELD g_aCsamPageRecFields[] =
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) CSAMR3Init(PVM pVM)
+VMMR3_INT_DECL(int) CSAMR3Init(PVM pVM)
{
int rc;
+ /*
+ * We only need a saved state dummy loader if HM is enabled.
+ */
+ if (HMIsEnabled(pVM))
+ {
+ pVM->fCSAMEnabled = false;
+ return SSMR3RegisterStub(pVM, "CSAM", 0);
+ }
+
+ /*
+ * Raw-mode.
+ */
LogFlow(("CSAMR3Init\n"));
/* Allocate bitmap for the page directory. */
@@ -324,6 +343,7 @@ static int csamReinit(PVM pVM)
*/
AssertRelease(!(RT_OFFSETOF(VM, csam.s) & 31));
AssertRelease(sizeof(pVM->csam.s) <= sizeof(pVM->csam.padding));
+ AssertRelease(!HMIsEnabled(pVM));
/*
* Setup any fixed pointers and offsets.
@@ -361,9 +381,9 @@ static int csamReinit(PVM pVM)
* @param pVM The VM.
* @param offDelta Relocation delta.
*/
-VMMR3DECL(void) CSAMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
+VMMR3_INT_DECL(void) CSAMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
{
- if (offDelta)
+ if (offDelta && !HMIsEnabled(pVM))
{
/* Adjust pgdir and page bitmap pointers. */
pVM->csam.s.pPDBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDGCBitmapHC);
@@ -389,8 +409,11 @@ VMMR3DECL(void) CSAMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) CSAMR3Term(PVM pVM)
+VMMR3_INT_DECL(int) CSAMR3Term(PVM pVM)
{
+ if (HMIsEnabled(pVM))
+ return VINF_SUCCESS;
+
int rc;
rc = CSAMR3Reset(pVM);
@@ -414,10 +437,13 @@ VMMR3DECL(int) CSAMR3Term(PVM pVM)
* @returns VBox status code.
* @param pVM The VM which is reset.
*/
-VMMR3DECL(int) CSAMR3Reset(PVM pVM)
+VMMR3_INT_DECL(int) CSAMR3Reset(PVM pVM)
{
+ if (HMIsEnabled(pVM))
+ return VINF_SUCCESS;
+
/* Clear page bitmaps. */
- for(int i=0;i<CSAM_PGDIRBMP_CHUNKS;i++)
+ for (int i = 0; i < CSAM_PGDIRBMP_CHUNKS; i++)
{
if (pVM->csam.s.pPDBitmapHC[i])
{
@@ -427,15 +453,12 @@ VMMR3DECL(int) CSAMR3Reset(PVM pVM)
}
/* Remove all CSAM page records. */
- while(true)
+ for (;;)
{
PCSAMPAGEREC pPageRec = (PCSAMPAGEREC)RTAvlPVGetBestFit(&pVM->csam.s.pPageTree, 0, true);
- if (pPageRec)
- {
- csamRemovePageRecord(pVM, pPageRec->page.pPageGC);
- }
- else
+ if (!pPageRec)
break;
+ csamRemovePageRecord(pVM, pPageRec->page.pPageGC);
}
Assert(!pVM->csam.s.pPageTree);
@@ -540,6 +563,7 @@ static DECLCALLBACK(int) csamr3Save(PVM pVM, PSSMHANDLE pSSM)
return VINF_SUCCESS;
}
+
/**
* Execute state load operation.
*
@@ -566,12 +590,8 @@ static DECLCALLBACK(int) csamr3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion,
/*
* Restore CSAM structure
*/
-#if 0
- rc = SSMR3GetMem(pSSM, &csamInfo, sizeof(csamInfo));
-#else
RT_ZERO(csamInfo);
- rc = SSMR3GetStructEx(pSSM, &csamInfo, sizeof(csamInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aCsamFields[0], NULL);
-#endif
+ rc = SSMR3GetStructEx(pSSM, &csamInfo, sizeof(csamInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED, &g_aCsamFields[0], NULL);
AssertRCReturn(rc, rc);
pVM->csam.s.fGatesChecked = csamInfo.fGatesChecked;
@@ -587,12 +607,8 @@ static DECLCALLBACK(int) csamr3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion,
memcpy(pVM->csam.s.pvPossibleCodePage, csamInfo.pvPossibleCodePage, sizeof(pVM->csam.s.pvPossibleCodePage));
/* Restore pgdir bitmap (we'll change the pointers next). */
-#if 0
- rc = SSMR3GetMem(pSSM, pVM->csam.s.pPDBitmapHC, CSAM_PGDIRBMP_CHUNKS*sizeof(RTHCPTR));
-#else
rc = SSMR3GetStructEx(pSSM, pVM->csam.s.pPDBitmapHC, sizeof(uint8_t *) * CSAM_PGDIRBMP_CHUNKS,
- SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aCsamPDBitmapArray[0], NULL);
-#endif
+ SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED, &g_aCsamPDBitmapArray[0], NULL);
AssertRCReturn(rc, rc);
/*
@@ -631,12 +647,8 @@ static DECLCALLBACK(int) csamr3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion,
CSAMPAGEREC page;
PCSAMPAGE pPage;
-#if 0
- rc = SSMR3GetMem(pSSM, &page, sizeof(page));
-#else
RT_ZERO(page);
- rc = SSMR3GetStructEx(pSSM, &page, sizeof(page), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aCsamPageRecFields[0], NULL);
-#endif
+ rc = SSMR3GetStructEx(pSSM, &page, sizeof(page), SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED, &g_aCsamPageRecFields[0], NULL);
AssertRCReturn(rc, rc);
/*
@@ -671,17 +683,18 @@ static DECLCALLBACK(int) csamr3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion,
/**
* Convert guest context address to host context pointer
*
- * @returns VBox status code.
+ * @returns Byte pointer (ring-3 context) corresponding to pGCPtr on success,
+ * NULL on failure.
* @param pVM Pointer to the VM.
* @param pCacheRec Address conversion cache record
* @param pGCPtr Guest context pointer
* @returns Host context pointer or NULL in case of an error
*
*/
-static R3PTRTYPE(void *) CSAMGCVirtToHCVirt(PVM pVM, PCSAMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
+static uint8_t *csamR3GCVirtToHCVirt(PVM pVM, PCSAMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
{
int rc;
- R3PTRTYPE(void *) pHCPtr;
+ void *pHCPtr;
Assert(pVM->cCpus == 1);
PVMCPU pVCpu = VMMGetCpu0(pVM);
@@ -689,7 +702,7 @@ static R3PTRTYPE(void *) CSAMGCVirtToHCVirt(PVM pVM, PCSAMP2GLOOKUPREC pCacheRec
pHCPtr = PATMR3GCPtrToHCPtr(pVM, pGCPtr);
if (pHCPtr)
- return pHCPtr;
+ return (uint8_t *)pHCPtr;
if (pCacheRec->pPageLocStartHC)
{
@@ -716,10 +729,10 @@ static R3PTRTYPE(void *) CSAMGCVirtToHCVirt(PVM pVM, PCSAMP2GLOOKUPREC pCacheRec
return NULL;
}
- pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
+ pCacheRec->pPageLocStartHC = (uint8_t*)((uintptr_t)pHCPtr & PAGE_BASE_HC_MASK);
pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
STAM_PROFILE_STOP(&pVM->csam.s.StatTimeAddrConv, a);
- return pHCPtr;
+ return (uint8_t *)pHCPtr;
}
@@ -841,15 +854,17 @@ static int CSAMR3AnalyseCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *
}
break;
+ /* removing breaks win2k guests? */
+ case OP_IRET:
+ if (EMIsRawRing1Enabled(pVM))
+ break;
+ /* no break */
+
case OP_ILLUD2:
/* This appears to be some kind of kernel panic in Linux 2.4; no point to continue. */
case OP_RETN:
case OP_INT3:
case OP_INVALID:
-#if 1
- /* removing breaks win2k guests? */
- case OP_IRET:
-#endif
return VINF_SUCCESS;
}
@@ -895,11 +910,10 @@ static int CSAMR3AnalyseCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *
cbInstrs += cbCurInstr;
{ /* Force pCurInstrHC out of scope after we stop using it (page lock!) */
- uint8_t *pCurInstrHC = 0;
- pCurInstrHC = (uint8_t *)CSAMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
+ uint8_t *pCurInstrHC = csamR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
if (pCurInstrHC == NULL)
{
- Log(("CSAMGCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
+ Log(("csamR3GCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
break;
}
Assert(VALID_PTR(pCurInstrHC));
@@ -914,12 +928,37 @@ static int CSAMR3AnalyseCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *
break;
}
+#ifdef VBOX_WITH_RAW_RING1
+ case OP_MOV:
+ /* mov xx, CS is a dangerous instruction as our raw ring usage leaks through. */
+ if ( EMIsRawRing1Enabled(pVM)
+ && (pCpu->Param2.fUse & DISUSE_REG_SEG)
+ && (pCpu->Param2.Base.idxSegReg == DISSELREG_CS))
+ {
+ Log(("CSAM: Patching dangerous 'mov xx, cs' instruction at %RGv with an int3\n", pCurInstrGC));
+ if (PATMR3HasBeenPatched(pVM, pCurInstrGC) == false)
+ {
+ rc = PATMR3InstallPatch(pVM, pCurInstrGC, (pPage->fCode32) ? PATMFL_CODE32 : 0);
+ if (RT_FAILURE(rc))
+ {
+ Log(("PATMR3InstallPatch failed with %d\n", rc));
+ return VWRN_CONTINUE_ANALYSIS;
+ }
+ }
+ return VWRN_CONTINUE_ANALYSIS;
+ }
+ break;
+#endif
+
case OP_PUSH:
+ /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
if (pCpu->pCurInstr->fParam1 != OP_PARM_REG_CS)
break;
/* no break */
+#ifndef VBOX_WITH_SAFE_STR
case OP_STR:
+#endif
case OP_LSL:
case OP_LAR:
case OP_SGDT:
@@ -1077,10 +1116,10 @@ static int csamAnalyseCallCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCP
*/
for (int j = 0; j < 16; j++)
{
- uint8_t *pCurInstrHC = (uint8_t *)CSAMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
+ uint8_t *pCurInstrHC = csamR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
if (pCurInstrHC == NULL)
{
- Log(("CSAMGCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
+ Log(("csamR3GCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
goto done;
}
Assert(VALID_PTR(pCurInstrHC));
@@ -1289,10 +1328,10 @@ static int csamAnalyseCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTY
}
{ /* Force pCurInstrHC out of scope after we stop using it (page lock!) */
- uint8_t *pCurInstrHC = (uint8_t *)CSAMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
+ uint8_t *pCurInstrHC = csamR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
if (pCurInstrHC == NULL)
{
- Log(("CSAMGCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
+ Log(("csamR3GCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
rc = VERR_PATCHING_REFUSED;
goto done;
}
@@ -1589,6 +1628,7 @@ static int csamFlushPage(PVM pVM, RTRCPTR addr, bool fRemovePage)
if (!CSAMIsEnabled(pVM))
return VINF_SUCCESS;
+ Assert(!HMIsEnabled(pVM));
PVMCPU pVCpu = VMMGetCpu0(pVM);
@@ -1689,7 +1729,7 @@ static int csamFlushPage(PVM pVM, RTRCPTR addr, bool fRemovePage)
* @param pVM Pointer to the VM.
* @param addr GC address of the page to flush
*/
-VMMR3DECL(int) CSAMR3FlushPage(PVM pVM, RTRCPTR addr)
+VMMR3_INT_DECL(int) CSAMR3FlushPage(PVM pVM, RTRCPTR addr)
{
return csamFlushPage(pVM, addr, true /* remove page record */);
}
@@ -1701,11 +1741,13 @@ VMMR3DECL(int) CSAMR3FlushPage(PVM pVM, RTRCPTR addr)
* @param pVM Pointer to the VM.
* @param addr GC address of the page to flush
*/
-VMMR3DECL(int) CSAMR3RemovePage(PVM pVM, RTRCPTR addr)
+VMMR3_INT_DECL(int) CSAMR3RemovePage(PVM pVM, RTRCPTR addr)
{
PCSAMPAGEREC pPageRec;
int rc;
+ AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
+
addr = addr & PAGE_BASE_GC_MASK;
pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)addr);
@@ -1882,7 +1924,8 @@ VMMR3DECL(int) CSAMR3MonitorPage(PVM pVM, RTRCPTR pPageAddrGC, CSAMTAG enmTag)
int rc;
bool fMonitorInvalidation;
Assert(pVM->cCpus == 1);
- PVMCPU pVCpu = VMMGetCpu0(pVM);
+ PVMCPU pVCpu = VMMGetCpu0(pVM);
+ Assert(!HMIsEnabled(pVM));
/* Dirty pages must be handled before calling this function!. */
Assert(!pVM->csam.s.cDirtyPages);
@@ -2002,6 +2045,8 @@ VMMR3DECL(int) CSAMR3MonitorPage(PVM pVM, RTRCPTR pPageAddrGC, CSAMTAG enmTag)
*/
VMMR3DECL(int) CSAMR3UnmonitorPage(PVM pVM, RTRCPTR pPageAddrGC, CSAMTAG enmTag)
{
+ Assert(!HMIsEnabled(pVM));
+
pPageAddrGC &= PAGE_BASE_GC_MASK;
Log(("CSAMR3UnmonitorPage %RRv %d\n", pPageAddrGC, enmTag));
@@ -2257,12 +2302,13 @@ static void csamMarkCode(PVM pVM, PCSAMPAGE pPage, RTRCPTR pInstr, uint32_t cbIn
* @param cbInstr Instruction size
* @param fScanned Mark as scanned or not
*/
-VMMR3DECL(int) CSAMR3MarkCode(PVM pVM, RTRCPTR pInstr, uint32_t cbInstr, bool fScanned)
+VMMR3_INT_DECL(int) CSAMR3MarkCode(PVM pVM, RTRCPTR pInstr, uint32_t cbInstr, bool fScanned)
{
PCSAMPAGE pPage = 0;
Assert(!fScanned); /* other case not implemented. */
Assert(!PATMIsPatchGCAddr(pVM, pInstr));
+ Assert(!HMIsEnabled(pVM));
if (csamIsCodeScanned(pVM, pInstr, &pPage) == false)
{
@@ -2284,8 +2330,9 @@ VMMR3DECL(int) CSAMR3MarkCode(PVM pVM, RTRCPTR pInstr, uint32_t cbInstr, bool fS
* @param pCtxCore CPU context
* @param pInstrGC Instruction pointer
*/
-VMMR3DECL(int) CSAMR3CheckCodeEx(PVM pVM, PCPUMCTXCORE pCtxCore, RTRCPTR pInstrGC)
+VMMR3_INT_DECL(int) CSAMR3CheckCodeEx(PVM pVM, PCPUMCTXCORE pCtxCore, RTRCPTR pInstrGC)
{
+ Assert(!HMIsEnabled(pVM));
if (EMIsRawRing0Enabled(pVM) == false || PATMIsPatchGCAddr(pVM, pInstrGC) == true)
{
// No use
@@ -2310,10 +2357,11 @@ VMMR3DECL(int) CSAMR3CheckCodeEx(PVM pVM, PCPUMCTXCORE pCtxCore, RTRCPTR pInstrG
* @param pVM Pointer to the VM.
* @param pInstrGC Instruction pointer (0:32 virtual address)
*/
-VMMR3DECL(int) CSAMR3CheckCode(PVM pVM, RTRCPTR pInstrGC)
+VMMR3_INT_DECL(int) CSAMR3CheckCode(PVM pVM, RTRCPTR pInstrGC)
{
int rc;
PCSAMPAGE pPage = NULL;
+ Assert(!HMIsEnabled(pVM));
if ( EMIsRawRing0Enabled(pVM) == false
|| PATMIsPatchGCAddr(pVM, pInstrGC) == true)
@@ -2324,7 +2372,7 @@ VMMR3DECL(int) CSAMR3CheckCode(PVM pVM, RTRCPTR pInstrGC)
if (CSAMIsEnabled(pVM))
{
- /* Cache record for CSAMGCVirtToHCVirt */
+ /* Cache record for csamR3GCVirtToHCVirt */
CSAMP2GLOOKUPREC cacheRec;
RT_ZERO(cacheRec);
@@ -2429,8 +2477,10 @@ static int csamR3FlushCodePages(PVM pVM)
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
*/
-VMMR3DECL(int) CSAMR3DoPendingAction(PVM pVM, PVMCPU pVCpu)
+VMMR3_INT_DECL(int) CSAMR3DoPendingAction(PVM pVM, PVMCPU pVCpu)
{
+ AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
+
csamR3FlushDirtyPages(pVM);
csamR3FlushCodePages(pVM);
@@ -2446,8 +2496,9 @@ VMMR3DECL(int) CSAMR3DoPendingAction(PVM pVM, PVMCPU pVCpu)
* @param iGate Start gate
* @param cGates Number of gates to check
*/
-VMMR3DECL(int) CSAMR3CheckGates(PVM pVM, uint32_t iGate, uint32_t cGates)
+VMMR3_INT_DECL(int) CSAMR3CheckGates(PVM pVM, uint32_t iGate, uint32_t cGates)
{
+#ifdef VBOX_WITH_RAW_MODE
Assert(pVM->cCpus == 1);
PVMCPU pVCpu = VMMGetCpu0(pVM);
uint16_t cbIDT;
@@ -2458,6 +2509,7 @@ VMMR3DECL(int) CSAMR3CheckGates(PVM pVM, uint32_t iGate, uint32_t cGates)
PVBOXIDTE pGuestIdte;
int rc;
+ AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
if (EMIsRawRing0Enabled(pVM) == false)
{
/* Enabling interrupt gates only works when raw ring 0 is enabled. */
@@ -2489,7 +2541,7 @@ VMMR3DECL(int) CSAMR3CheckGates(PVM pVM, uint32_t iGate, uint32_t cGates)
if (pHandler)
{
PCSAMPAGE pPage = NULL;
- CSAMP2GLOOKUPREC cacheRec; /* Cache record for CSAMGCVirtToHCVirt. */
+ CSAMP2GLOOKUPREC cacheRec; /* Cache record for csamR3GCVirtToHCVirt. */
RT_ZERO(cacheRec);
Log(("CSAMCheckGates: checking previous call instruction %RRv\n", pHandler));
@@ -2546,7 +2598,7 @@ VMMR3DECL(int) CSAMR3CheckGates(PVM pVM, uint32_t iGate, uint32_t cGates)
RTRCPTR pHandler;
PCSAMPAGE pPage = NULL;
DBGFSELINFO selInfo;
- CSAMP2GLOOKUPREC cacheRec; /* Cache record for CSAMGCVirtToHCVirt. */
+ CSAMP2GLOOKUPREC cacheRec; /* Cache record for csamR3GCVirtToHCVirt. */
RT_ZERO(cacheRec);
pHandler = VBOXIDTE_OFFSET(*pGuestIdte);
@@ -2636,7 +2688,8 @@ VMMR3DECL(int) CSAMR3CheckGates(PVM pVM, uint32_t iGate, uint32_t cGates)
Log(("Installing %s gate handler for 0x%X at %RRv\n", (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32) ? "trap" : "intr", iGate, pHandler));
rc = PATMR3InstallPatch(pVM, pHandler, fPatchFlags);
- if (RT_SUCCESS(rc) || rc == VERR_PATM_ALREADY_PATCHED)
+ if ( RT_SUCCESS(rc)
+ || rc == VERR_PATM_ALREADY_PATCHED)
{
Log(("Gate handler 0x%X is SAFE!\n", iGate));
@@ -2651,6 +2704,7 @@ VMMR3DECL(int) CSAMR3CheckGates(PVM pVM, uint32_t iGate, uint32_t cGates)
}
} /* for */
STAM_PROFILE_STOP(&pVM->csam.s.StatCheckGates, a);
+#endif /* VBOX_WITH_RAW_MODE */
return VINF_SUCCESS;
}
@@ -2663,6 +2717,7 @@ VMMR3DECL(int) CSAMR3CheckGates(PVM pVM, uint32_t iGate, uint32_t cGates)
*/
VMMR3DECL(int) CSAMR3RecordCallAddress(PVM pVM, RTRCPTR GCPtrCall)
{
+ Assert(!HMIsEnabled(pVM));
for (unsigned i=0;i<RT_ELEMENTS(pVM->csam.s.pvCallInstruction);i++)
{
if (pVM->csam.s.pvCallInstruction[i] == GCPtrCall)
@@ -2682,55 +2737,79 @@ VMMR3DECL(int) CSAMR3RecordCallAddress(PVM pVM, RTRCPTR GCPtrCall)
/**
* Query CSAM state (enabled/disabled)
*
- * @returns 0 - disabled, 1 - enabled
- * @param pVM Pointer to the VM.
+ * @returns true if enabled, false otherwise.
+ * @param pUVM The user mode VM handle.
*/
-VMMR3DECL(int) CSAMR3IsEnabled(PVM pVM)
+VMMR3DECL(bool) CSAMR3IsEnabled(PUVM pUVM)
{
- return pVM->fCSAMEnabled;
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return CSAMIsEnabled(pVM);
}
-#ifdef VBOX_WITH_DEBUGGER
/**
- * The '.csamoff' command.
+ * Enables or disables code scanning.
*
- * @returns VBox status.
- * @param pCmd Pointer to the command descriptor (as registered).
- * @param pCmdHlp Pointer to command helper functions.
- * @param pVM Pointer to the current VM (if any).
- * @param paArgs Pointer to (readonly) array of arguments.
- * @param cArgs Number of arguments in the array.
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param fEnabled Whether to enable or disable scanning.
*/
-static DECLCALLBACK(int) csamr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
+VMMR3DECL(int) CSAMR3SetScanningEnabled(PUVM pUVM, bool fEnabled)
{
- DBGC_CMDHLP_REQ_VM_RET(pCmdHlp, pCmd, pVM);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ if (HMIsEnabled(pVM))
+ {
+ Assert(!pVM->fCSAMEnabled);
+ return VINF_SUCCESS;
+ }
+
+ int rc;
+ if (fEnabled)
+ rc = CSAMEnableScanning(pVM);
+ else
+ rc = CSAMDisableScanning(pVM);
+ return rc;
+}
+
+
+#ifdef VBOX_WITH_DEBUGGER
+
+/**
+ * @callback_method_impl{FNDBGCCMD, The '.csamoff' command.}
+ */
+static DECLCALLBACK(int) csamr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
+{
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
NOREF(cArgs); NOREF(paArgs);
- int rc = CSAMDisableScanning(pVM);
+ if (HMR3IsEnabled(pUVM))
+ return DBGCCmdHlpPrintf(pCmdHlp, "CSAM is permanently disabled by HM.\n");
+
+ int rc = CSAMR3SetScanningEnabled(pUVM, false);
if (RT_FAILURE(rc))
- return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "CSAMDisableScanning");
+ return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "CSAMR3SetScanningEnabled");
return DBGCCmdHlpPrintf(pCmdHlp, "CSAM Scanning disabled\n");
}
/**
- * The '.csamon' command.
- *
- * @returns VBox status.
- * @param pCmd Pointer to the command descriptor (as registered).
- * @param pCmdHlp Pointer to command helper functions.
- * @param pVM Pointer to the current VM (if any).
- * @param paArgs Pointer to (readonly) array of arguments.
- * @param cArgs Number of arguments in the array.
+ * @callback_method_impl{FNDBGCCMD, The '.csamon' command.}
*/
-static DECLCALLBACK(int) csamr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
+static DECLCALLBACK(int) csamr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
{
- DBGC_CMDHLP_REQ_VM_RET(pCmdHlp, pCmd, pVM);
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
NOREF(cArgs); NOREF(paArgs);
- int rc = CSAMEnableScanning(pVM);
+ if (HMR3IsEnabled(pUVM))
+ return DBGCCmdHlpPrintf(pCmdHlp, "CSAM is permanently disabled by HM.\n");
+
+ int rc = CSAMR3SetScanningEnabled(pUVM, true);
if (RT_FAILURE(rc))
- return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "CSAMEnableScanning");
+ return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "CSAMR3SetScanningEnabled");
return DBGCCmdHlpPrintf(pCmdHlp, "CSAM Scanning enabled\n");
}
diff --git a/src/VBox/VMM/VMMR3/DBGF.cpp b/src/VBox/VMM/VMMR3/DBGF.cpp
index 994c5f81..bb4c735d 100644
--- a/src/VBox/VMM/VMMR3/DBGF.cpp
+++ b/src/VBox/VMM/VMMR3/DBGF.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -76,9 +76,10 @@
# include <VBox/vmm/rem.h>
#endif
#include <VBox/vmm/em.h>
-#include <VBox/vmm/hwaccm.h>
+#include <VBox/vmm/hm.h>
#include "DBGFInternal.h"
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include <VBox/err.h>
#include <VBox/log.h>
@@ -133,17 +134,19 @@ DECLINLINE(DBGFCMD) dbgfR3SetCmd(PVM pVM, DBGFCMD enmCmd)
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) DBGFR3Init(PVM pVM)
+VMMR3_INT_DECL(int) DBGFR3Init(PVM pVM)
{
- int rc = dbgfR3InfoInit(pVM);
+ PUVM pUVM = pVM->pUVM;
+ AssertCompile(sizeof(pUVM->dbgf.s) <= sizeof(pUVM->dbgf.padding));
+ AssertCompile(sizeof(pUVM->aCpus[0].dbgf.s) <= sizeof(pUVM->aCpus[0].dbgf.padding));
+
+ int rc = dbgfR3InfoInit(pUVM);
if (RT_SUCCESS(rc))
rc = dbgfR3TraceInit(pVM);
if (RT_SUCCESS(rc))
- rc = dbgfR3RegInit(pVM);
- if (RT_SUCCESS(rc))
- rc = dbgfR3AsInit(pVM);
+ rc = dbgfR3RegInit(pUVM);
if (RT_SUCCESS(rc))
- rc = dbgfR3SymInit(pVM);
+ rc = dbgfR3AsInit(pUVM);
if (RT_SUCCESS(rc))
rc = dbgfR3BpInit(pVM);
return rc;
@@ -156,9 +159,27 @@ VMMR3DECL(int) DBGFR3Init(PVM pVM)
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) DBGFR3Term(PVM pVM)
+VMMR3_INT_DECL(int) DBGFR3Term(PVM pVM)
+{
+ PUVM pUVM = pVM->pUVM;
+
+ dbgfR3OSTerm(pUVM);
+ dbgfR3AsTerm(pUVM);
+ dbgfR3RegTerm(pUVM);
+ dbgfR3TraceTerm(pVM);
+ dbgfR3InfoTerm(pUVM);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Called when the VM is powered off to detach debuggers.
+ *
+ * @param pVM The VM handle.
+ */
+VMMR3_INT_DECL(void) DBGFR3PowerOff(PVM pVM)
{
- int rc;
/*
* Send a termination event to any attached debugger.
@@ -168,55 +189,84 @@ VMMR3DECL(int) DBGFR3Term(PVM pVM)
&& RTSemPingShouldWait(&pVM->dbgf.s.PingPong))
RTSemPingWait(&pVM->dbgf.s.PingPong, 5000);
- /* now, send the event if we're the speaker. */
- if ( pVM->dbgf.s.fAttached
- && RTSemPingIsSpeaker(&pVM->dbgf.s.PingPong))
+ if (pVM->dbgf.s.fAttached)
{
- DBGFCMD enmCmd = dbgfR3SetCmd(pVM, DBGFCMD_NO_COMMAND);
- if (enmCmd == DBGFCMD_DETACH_DEBUGGER)
- /* the debugger beat us to initiating the detaching. */
- rc = VINF_SUCCESS;
- else
+ /* Just mark it as detached if we're not in a position to send a power
+ off event. It should fail later on. */
+ if (!RTSemPingIsSpeaker(&pVM->dbgf.s.PingPong))
{
- /* ignore the command (if any). */
- enmCmd = DBGFCMD_NO_COMMAND;
- pVM->dbgf.s.DbgEvent.enmType = DBGFEVENT_TERMINATING;
- pVM->dbgf.s.DbgEvent.enmCtx = DBGFEVENTCTX_OTHER;
- rc = RTSemPing(&pVM->dbgf.s.PingPong);
+ ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, false);
+ if (RTSemPingIsSpeaker(&pVM->dbgf.s.PingPong))
+ ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, true);
}
- /*
- * Process commands until we get a detached command.
- */
- while (RT_SUCCESS(rc) && enmCmd != DBGFCMD_DETACHED_DEBUGGER)
+ if (RTSemPingIsSpeaker(&pVM->dbgf.s.PingPong))
{
- if (enmCmd != DBGFCMD_NO_COMMAND)
+ /* Try send the power off event. */
+ int rc;
+ DBGFCMD enmCmd = dbgfR3SetCmd(pVM, DBGFCMD_NO_COMMAND);
+ if (enmCmd == DBGFCMD_DETACH_DEBUGGER)
+ /* the debugger beat us to initiating the detaching. */
+ rc = VINF_SUCCESS;
+ else
{
- /* process command */
- bool fResumeExecution;
- DBGFCMDDATA CmdData = pVM->dbgf.s.VMMCmdData;
- rc = dbgfR3VMMCmd(pVM, enmCmd, &CmdData, &fResumeExecution);
+ /* ignore the command (if any). */
enmCmd = DBGFCMD_NO_COMMAND;
+ pVM->dbgf.s.DbgEvent.enmType = DBGFEVENT_POWERING_OFF;
+ pVM->dbgf.s.DbgEvent.enmCtx = DBGFEVENTCTX_OTHER;
+ rc = RTSemPing(&pVM->dbgf.s.PingPong);
}
- else
+
+ /*
+ * Process commands and priority requests until we get a command
+ * indicating that the debugger has detached.
+ */
+ uint32_t cPollHack = 1;
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+ while (RT_SUCCESS(rc))
{
- /* wait for new command. */
- rc = RTSemPingWait(&pVM->dbgf.s.PingPong, RT_INDEFINITE_WAIT);
- if (RT_SUCCESS(rc))
- enmCmd = dbgfR3SetCmd(pVM, DBGFCMD_NO_COMMAND);
+ if (enmCmd != DBGFCMD_NO_COMMAND)
+ {
+ /* process command */
+ bool fResumeExecution;
+ DBGFCMDDATA CmdData = pVM->dbgf.s.VMMCmdData;
+ rc = dbgfR3VMMCmd(pVM, enmCmd, &CmdData, &fResumeExecution);
+ if (enmCmd == DBGFCMD_DETACHED_DEBUGGER)
+ break;
+ enmCmd = DBGFCMD_NO_COMMAND;
+ }
+ else
+ {
+ /* Wait for new command, processing pending priority requests
+ first. The request processing is a bit crazy, but
+ unfortunately required by plugin unloading. */
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
+ {
+ LogFlow(("DBGFR3PowerOff: Processes priority requests...\n"));
+ rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, true /*fPriorityOnly*/);
+ if (rc == VINF_SUCCESS)
+ rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, true /*fPriorityOnly*/);
+ LogFlow(("DBGFR3PowerOff: VMR3ReqProcess -> %Rrc\n", rc));
+ cPollHack = 1;
+ }
+ else if (cPollHack < 120)
+ cPollHack++;
+
+ rc = RTSemPingWait(&pVM->dbgf.s.PingPong, cPollHack);
+ if (RT_SUCCESS(rc))
+ enmCmd = dbgfR3SetCmd(pVM, DBGFCMD_NO_COMMAND);
+ else if (rc == VERR_TIMEOUT)
+ rc = VINF_SUCCESS;
+ }
}
+
+ /*
+ * Clear the FF so we won't get confused later on.
+ */
+ VM_FF_CLEAR(pVM, VM_FF_DBGF);
}
}
-
- /*
- * Terminate the other bits.
- */
- dbgfR3OSTerm(pVM);
- dbgfR3AsTerm(pVM);
- dbgfR3RegTerm(pVM);
- dbgfR3TraceTerm(pVM);
- dbgfR3InfoTerm(pVM);
- return VINF_SUCCESS;
}
@@ -228,10 +278,10 @@ VMMR3DECL(int) DBGFR3Term(PVM pVM)
* @param pVM Pointer to the VM.
* @param offDelta Relocation delta relative to old location.
*/
-VMMR3DECL(void) DBGFR3Relocate(PVM pVM, RTGCINTPTR offDelta)
+VMMR3_INT_DECL(void) DBGFR3Relocate(PVM pVM, RTGCINTPTR offDelta)
{
dbgfR3TraceRelocate(pVM);
- dbgfR3AsRelocate(pVM, offDelta);
+ dbgfR3AsRelocate(pVM->pUVM, offDelta);
}
@@ -252,7 +302,7 @@ bool dbgfR3WaitForAttach(PVM pVM, DBGFEVENTTYPE enmEvent)
# if !defined(DEBUG) || defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(IEM_VERIFICATION_MODE)
int cWait = 10;
# else
- int cWait = HWACCMIsEnabled(pVM)
+ int cWait = HMIsEnabled(pVM)
&& ( enmEvent == DBGFEVENT_ASSERTION_HYPER
|| enmEvent == DBGFEVENT_FATAL_ERROR)
&& !RTEnvExist("VBOX_DBGF_WAIT_FOR_ATTACH")
@@ -298,25 +348,19 @@ bool dbgfR3WaitForAttach(PVM pVM, DBGFEVENTTYPE enmEvent)
* @returns VERR_DBGF_RAISE_FATAL_ERROR to pretend a fatal error happened.
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) DBGFR3VMMForcedAction(PVM pVM)
+VMMR3_INT_DECL(int) DBGFR3VMMForcedAction(PVM pVM)
{
int rc = VINF_SUCCESS;
- if (VM_FF_TESTANDCLEAR(pVM, VM_FF_DBGF))
+ if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_DBGF))
{
PVMCPU pVCpu = VMMGetCpu(pVM);
/*
- * Commands?
+ * Command pending? Process it.
*/
if (pVM->dbgf.s.enmVMMCmd != DBGFCMD_NO_COMMAND)
{
- /** @todo stupid GDT/LDT sync hack. go away! */
- SELMR3UpdateFromCPUM(pVM, pVCpu);
-
- /*
- * Process the command.
- */
bool fResumeExecution;
DBGFCMDDATA CmdData = pVM->dbgf.s.VMMCmdData;
DBGFCMD enmCmd = dbgfR3SetCmd(pVM, DBGFCMD_NO_COMMAND);
@@ -448,6 +492,7 @@ static int dbgfR3SendEvent(PVM pVM)
* @returns VBox status.
* @param pVM Pointer to the VM.
* @param enmEvent The event to send.
+ * @internal
*/
VMMR3DECL(int) DBGFR3Event(PVM pVM, DBGFEVENTTYPE enmEvent)
{
@@ -475,6 +520,7 @@ VMMR3DECL(int) DBGFR3Event(PVM pVM, DBGFEVENTTYPE enmEvent)
* @param pszFunction Function name.
* @param pszFormat Message which accompanies the event.
* @param ... Message arguments.
+ * @internal
*/
VMMR3DECL(int) DBGFR3EventSrc(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, ...)
{
@@ -497,6 +543,7 @@ VMMR3DECL(int) DBGFR3EventSrc(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFi
* @param pszFunction Function name.
* @param pszFormat Message which accompanies the event.
* @param args Message arguments.
+ * @internal
*/
VMMR3DECL(int) DBGFR3EventSrcV(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, va_list args)
{
@@ -537,7 +584,7 @@ VMMR3DECL(int) DBGFR3EventSrcV(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszF
* @param pszMsg1 First assertion message.
* @param pszMsg2 Second assertion message.
*/
-VMMR3DECL(int) DBGFR3EventAssertion(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszMsg1, const char *pszMsg2)
+VMMR3_INT_DECL(int) DBGFR3EventAssertion(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszMsg1, const char *pszMsg2)
{
int rc = dbgfR3EventPrologue(pVM, enmEvent);
if (RT_FAILURE(rc))
@@ -562,7 +609,7 @@ VMMR3DECL(int) DBGFR3EventAssertion(PVM pVM, DBGFEVENTTYPE enmEvent, const char
* @param pVM Pointer to the VM.
* @param enmEvent DBGFEVENT_BREAKPOINT_HYPER or DBGFEVENT_BREAKPOINT.
*/
-VMMR3DECL(int) DBGFR3EventBreakpoint(PVM pVM, DBGFEVENTTYPE enmEvent)
+VMMR3_INT_DECL(int) DBGFR3EventBreakpoint(PVM pVM, DBGFEVENTTYPE enmEvent)
{
int rc = dbgfR3EventPrologue(pVM, enmEvent);
if (RT_FAILURE(rc))
@@ -614,9 +661,6 @@ static int dbgfR3VMMWait(PVM pVM)
PVMCPU pVCpu = VMMGetCpu(pVM);
LogFlow(("dbgfR3VMMWait:\n"));
-
- /** @todo stupid GDT/LDT sync hack. go away! */
- SELMR3UpdateFromCPUM(pVM, pVCpu);
int rcRet = VINF_SUCCESS;
/*
@@ -631,8 +675,8 @@ static int dbgfR3VMMWait(PVM pVM)
for (;;)
{
int rc;
- if ( !VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_REQUEST)
- && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
+ if ( !VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_REQUEST)
+ && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
{
rc = RTSemPingWait(&pVM->dbgf.s.PingPong, cPollHack);
if (RT_SUCCESS(rc))
@@ -644,13 +688,13 @@ static int dbgfR3VMMWait(PVM pVM)
}
}
- if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
+ if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
{
rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
cPollHack = 1;
}
- else if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
+ else if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
{
LogFlow(("dbgfR3VMMWait: Processes requests...\n"));
rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
@@ -766,6 +810,9 @@ static int dbgfR3VMMCmd(PVM pVM, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pf
*/
case DBGFCMD_GO:
{
+ /** @todo SMP */
+ PVMCPU pVCpu = VMMGetCpu0(pVM);
+ pVCpu->dbgf.s.fSingleSteppingRaw = false;
fSendEvent = false;
fResume = true;
break;
@@ -855,10 +902,12 @@ static int dbgfR3VMMCmd(PVM pVM, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pf
* Only one debugger at a time.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
*/
-VMMR3DECL(int) DBGFR3Attach(PVM pVM)
+VMMR3DECL(int) DBGFR3Attach(PUVM pUVM)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
/*
@@ -903,17 +952,27 @@ static DECLCALLBACK(int) dbgfR3Attach(PVM pVM)
* Caller must be attached to the VM.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
*/
-VMMR3DECL(int) DBGFR3Detach(PVM pVM)
+VMMR3DECL(int) DBGFR3Detach(PUVM pUVM)
{
LogFlow(("DBGFR3Detach:\n"));
int rc;
/*
+ * Validate input. The UVM handle shall be valid, the VM handle might be
+ * in the processes of being destroyed already, so deal quietly with that.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ if (!VM_IS_VALID_EXT(pVM))
+ return VERR_INVALID_VM_HANDLE;
+
+ /*
* Check if attached.
*/
- AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
+ if (!pVM->dbgf.s.fAttached)
+ return VERR_DBGF_NOT_ATTACHED;
/*
* Try send the detach command.
@@ -949,15 +1008,18 @@ VMMR3DECL(int) DBGFR3Detach(PVM pVM)
* Wait for a debug event.
*
* @returns VBox status. Will not return VBOX_INTERRUPTED.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param cMillies Number of millis to wait.
* @param ppEvent Where to store the event pointer.
*/
-VMMR3DECL(int) DBGFR3EventWait(PVM pVM, RTMSINTERVAL cMillies, PCDBGFEVENT *ppEvent)
+VMMR3DECL(int) DBGFR3EventWait(PUVM pUVM, RTMSINTERVAL cMillies, PCDBGFEVENT *ppEvent)
{
/*
* Check state.
*/
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
*ppEvent = NULL;
@@ -983,13 +1045,16 @@ VMMR3DECL(int) DBGFR3EventWait(PVM pVM, RTMSINTERVAL cMillies, PCDBGFEVENT *ppEv
* arrives. Until that time it's not possible to issue any new commands.
*
* @returns VBox status.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
*/
-VMMR3DECL(int) DBGFR3Halt(PVM pVM)
+VMMR3DECL(int) DBGFR3Halt(PUVM pUVM)
{
/*
* Check state.
*/
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
RTPINGPONGSPEAKER enmSpeaker = pVM->dbgf.s.PingPong.enmSpeaker;
if ( enmSpeaker == RTPINGPONGSPEAKER_PONG
@@ -1010,11 +1075,15 @@ VMMR3DECL(int) DBGFR3Halt(PVM pVM)
*
* @returns True if halted.
* @returns False if not halted.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
*/
-VMMR3DECL(bool) DBGFR3IsHalted(PVM pVM)
+VMMR3DECL(bool) DBGFR3IsHalted(PUVM pUVM)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
AssertReturn(pVM->dbgf.s.fAttached, false);
+
RTPINGPONGSPEAKER enmSpeaker = pVM->dbgf.s.PingPong.enmSpeaker;
return enmSpeaker == RTPINGPONGSPEAKER_PONG_SIGNALED
|| enmSpeaker == RTPINGPONGSPEAKER_PONG;
@@ -1026,14 +1095,32 @@ VMMR3DECL(bool) DBGFR3IsHalted(PVM pVM)
*
* This function is only used by lazy, multiplexing debuggers. :-)
*
- * @returns True if waitable.
- * @returns False if not waitable.
- * @param pVM Pointer to the VM.
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS if waitable.
+ * @retval VERR_SEM_OUT_OF_TURN if not waitable.
+ * @retval VERR_INVALID_VM_HANDLE if the VM is being (/ has been) destroyed
+ * (not asserted) or if the handle is invalid (asserted).
+ * @retval VERR_DBGF_NOT_ATTACHED if not attached.
+ *
+ * @param pUVM The user mode VM handle.
*/
-VMMR3DECL(bool) DBGFR3CanWait(PVM pVM)
+VMMR3DECL(int) DBGFR3QueryWaitable(PUVM pUVM)
{
- AssertReturn(pVM->dbgf.s.fAttached, false);
- return RTSemPongShouldWait(&pVM->dbgf.s.PingPong);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /* Note! There is a slight race here, unfortunately. */
+ PVM pVM = pUVM->pVM;
+ if (!RT_VALID_PTR(pVM))
+ return VERR_INVALID_VM_HANDLE;
+ if (pVM->enmVMState >= VMSTATE_DESTROYING)
+ return VERR_INVALID_VM_HANDLE;
+ if (!pVM->dbgf.s.fAttached)
+ return VERR_DBGF_NOT_ATTACHED;
+
+ if (!RTSemPongShouldWait(&pVM->dbgf.s.PingPong))
+ return VERR_SEM_OUT_OF_TURN;
+
+ return VINF_SUCCESS;
}
@@ -1043,13 +1130,16 @@ VMMR3DECL(bool) DBGFR3CanWait(PVM pVM)
* There is no receipt event on this command.
*
* @returns VBox status.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
*/
-VMMR3DECL(int) DBGFR3Resume(PVM pVM)
+VMMR3DECL(int) DBGFR3Resume(PUVM pUVM)
{
/*
* Check state.
*/
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
AssertReturn(RTSemPongIsSpeaker(&pVM->dbgf.s.PingPong), VERR_SEM_OUT_OF_TURN);
@@ -1071,14 +1161,17 @@ VMMR3DECL(int) DBGFR3Resume(PVM pVM)
* The current implementation is not reliable, so don't rely on the event coming.
*
* @returns VBox status.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The ID of the CPU to single step on.
*/
-VMMR3DECL(int) DBGFR3Step(PVM pVM, VMCPUID idCpu)
+VMMR3DECL(int) DBGFR3Step(PUVM pUVM, VMCPUID idCpu)
{
/*
* Check state.
*/
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
AssertReturn(RTSemPongIsSpeaker(&pVM->dbgf.s.PingPong), VERR_SEM_OUT_OF_TURN);
AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
@@ -1106,8 +1199,9 @@ VMMR3DECL(int) DBGFR3Step(PVM pVM, VMCPUID idCpu)
* @param pVCpu Pointer to the VMCPU.
*
* @thread VCpu EMT
+ * @internal
*/
-VMMR3DECL(int) DBGFR3PrgStep(PVMCPU pVCpu)
+VMMR3_INT_DECL(int) DBGFR3PrgStep(PVMCPU pVCpu)
{
VMCPU_ASSERT_EMT(pVCpu);
@@ -1115,3 +1209,25 @@ VMMR3DECL(int) DBGFR3PrgStep(PVMCPU pVCpu)
return VINF_EM_DBG_STEP;
}
+
+/**
+ * Inject an NMI into a running VM (only VCPU 0!)
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ */
+VMMR3DECL(int) DBGFR3InjectNMI(PUVM pUVM, VMCPUID idCpu)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+
+ /** @todo Implement generic NMI injection. */
+ if (!HMIsEnabled(pVM))
+ return VERR_NOT_SUP_IN_RAW_MODE;
+
+ VMCPU_FF_SET(&pVM->aCpus[idCpu], VMCPU_FF_INTERRUPT_NMI);
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFAddr.cpp b/src/VBox/VMM/VMMR3/DBGFAddr.cpp
index 89cd0ca0..e2851189 100644
--- a/src/VBox/VMM/VMMR3/DBGFAddr.cpp
+++ b/src/VBox/VMM/VMMR3/DBGFAddr.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -24,8 +24,11 @@
#include <VBox/vmm/pgm.h>
#include <VBox/vmm/selm.h>
#include <VBox/vmm/mm.h>
+#include <VBox/vmm/hm.h>
#include "DBGFInternal.h"
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+
#include <VBox/param.h>
#include <VBox/err.h>
#include <VBox/log.h>
@@ -35,14 +38,17 @@
/**
* Checks if an address is in the HMA or not.
- * @returns true if it's inside the HMA.
- * @returns flase if it's not inside the HMA.
- * @param pVM Pointer to the VM.
- * @param FlatPtr The address in question.
+ *
+ * @retval true if it's inside the HMA.
+ * @retval flase if it's not inside the HMA.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param FlatPtr The address in question.
*/
-DECLINLINE(bool) dbgfR3IsHMA(PVM pVM, RTGCUINTPTR FlatPtr)
+DECLINLINE(bool) dbgfR3IsHMA(PUVM pUVM, RTGCUINTPTR FlatPtr)
{
- return MMHyperIsInsideArea(pVM, FlatPtr);
+ return !HMIsEnabled(pUVM->pVM)
+ && MMHyperIsInsideArea(pUVM->pVM, FlatPtr);
}
@@ -91,36 +97,43 @@ static int dbgfR3AddrFromSelInfoOffWorker(PDBGFADDRESS pAddress, PCDBGFSELINFO p
* Creates a mixed address from a Sel:off pair.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param idCpu The CPU ID.
- * @param pAddress Where to store the mixed address.
- * @param Sel The selector part.
- * @param off The offset part.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The CPU ID.
+ * @param pAddress Where to store the mixed address.
+ * @param Sel The selector part.
+ * @param off The offset part.
*/
-VMMR3DECL(int) DBGFR3AddrFromSelOff(PVM pVM, VMCPUID idCpu, PDBGFADDRESS pAddress, RTSEL Sel, RTUINTPTR off)
+VMMR3DECL(int) DBGFR3AddrFromSelOff(PUVM pUVM, VMCPUID idCpu, PDBGFADDRESS pAddress, RTSEL Sel, RTUINTPTR off)
{
- AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_PARAMETER);
pAddress->Sel = Sel;
pAddress->off = off;
if (Sel != DBGF_SEL_FLAT)
{
DBGFSELINFO SelInfo;
- int rc = DBGFR3SelQueryInfo(pVM, idCpu, Sel, DBGFSELQI_FLAGS_DT_GUEST | DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE, &SelInfo);
+ int rc = DBGFR3SelQueryInfo(pUVM, idCpu, Sel, DBGFSELQI_FLAGS_DT_GUEST | DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE, &SelInfo);
+ if (RT_FAILURE(rc) && !HMIsEnabled(pUVM->pVM))
+ rc = DBGFR3SelQueryInfo(pUVM, idCpu, Sel, DBGFSELQI_FLAGS_DT_SHADOW, &SelInfo);
if (RT_FAILURE(rc))
return rc;
rc = dbgfR3AddrFromSelInfoOffWorker(pAddress, &SelInfo, off);
if (RT_FAILURE(rc))
return rc;
+ if ( (SelInfo.fFlags & DBGFSELINFO_FLAGS_HYPER)
+ || dbgfR3IsHMA(pUVM, pAddress->FlatPtr))
+ pAddress->fFlags |= DBGFADDRESS_FLAGS_HMA;
}
else
{
pAddress->FlatPtr = off;
pAddress->fFlags = DBGFADDRESS_FLAGS_FLAT;
+ if (dbgfR3IsHMA(pUVM, pAddress->FlatPtr))
+ pAddress->fFlags |= DBGFADDRESS_FLAGS_HMA;
}
pAddress->fFlags |= DBGFADDRESS_FLAGS_VALID;
- if (dbgfR3IsHMA(pVM, pAddress->FlatPtr))
- pAddress->fFlags |= DBGFADDRESS_FLAGS_HMA;
return VINF_SUCCESS;
}
@@ -131,22 +144,27 @@ VMMR3DECL(int) DBGFR3AddrFromSelOff(PVM pVM, VMCPUID idCpu, PDBGFADDRESS pAddres
* described by it.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param idCpu The CPU ID.
- * @param pAddress Where to store the mixed address.
- * @param pSelInfo The selector info.
- * @param off The offset part.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The CPU ID.
+ * @param pAddress Where to store the mixed address.
+ * @param pSelInfo The selector info.
+ * @param off The offset part.
*/
-VMMR3DECL(int) DBGFR3AddrFromSelInfoOff(PVM pVM, PDBGFADDRESS pAddress, PCDBGFSELINFO pSelInfo, RTUINTPTR off)
+VMMR3DECL(int) DBGFR3AddrFromSelInfoOff(PUVM pUVM, PDBGFADDRESS pAddress, PCDBGFSELINFO pSelInfo, RTUINTPTR off)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
pAddress->Sel = pSelInfo->Sel;
pAddress->off = off;
int rc = dbgfR3AddrFromSelInfoOffWorker(pAddress, pSelInfo, off);
if (RT_FAILURE(rc))
return rc;
+
pAddress->fFlags |= DBGFADDRESS_FLAGS_VALID;
- if (dbgfR3IsHMA(pVM, pAddress->FlatPtr))
+ if (dbgfR3IsHMA(pUVM, pAddress->FlatPtr))
pAddress->fFlags |= DBGFADDRESS_FLAGS_HMA;
+
return VINF_SUCCESS;
}
@@ -155,17 +173,19 @@ VMMR3DECL(int) DBGFR3AddrFromSelInfoOff(PVM pVM, PDBGFADDRESS pAddress, PCDBGFSE
* Creates a mixed address from a flat address.
*
* @returns pAddress.
- * @param pVM Pointer to the VM.
- * @param pAddress Where to store the mixed address.
- * @param FlatPtr The flat pointer.
+ * @param pUVM The user mode VM handle.
+ * @param pAddress Where to store the mixed address.
+ * @param FlatPtr The flat pointer.
*/
-VMMR3DECL(PDBGFADDRESS) DBGFR3AddrFromFlat(PVM pVM, PDBGFADDRESS pAddress, RTGCUINTPTR FlatPtr)
+VMMR3DECL(PDBGFADDRESS) DBGFR3AddrFromFlat(PUVM pUVM, PDBGFADDRESS pAddress, RTGCUINTPTR FlatPtr)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
pAddress->Sel = DBGF_SEL_FLAT;
pAddress->off = FlatPtr;
pAddress->FlatPtr = FlatPtr;
pAddress->fFlags = DBGFADDRESS_FLAGS_FLAT | DBGFADDRESS_FLAGS_VALID;
- if (dbgfR3IsHMA(pVM, pAddress->FlatPtr))
+ if (dbgfR3IsHMA(pUVM, pAddress->FlatPtr))
pAddress->fFlags |= DBGFADDRESS_FLAGS_HMA;
return pAddress;
}
@@ -175,13 +195,13 @@ VMMR3DECL(PDBGFADDRESS) DBGFR3AddrFromFlat(PVM pVM, PDBGFADDRESS pAddress, RTGCU
* Creates a mixed address from a guest physical address.
*
* @returns pAddress.
- * @param pVM Pointer to the VM.
- * @param pAddress Where to store the mixed address.
- * @param PhysAddr The guest physical address.
+ * @param pUVM The user mode VM handle.
+ * @param pAddress Where to store the mixed address.
+ * @param PhysAddr The guest physical address.
*/
-VMMR3DECL(PDBGFADDRESS) DBGFR3AddrFromPhys(PVM pVM, PDBGFADDRESS pAddress, RTGCPHYS PhysAddr)
+VMMR3DECL(PDBGFADDRESS) DBGFR3AddrFromPhys(PUVM pUVM, PDBGFADDRESS pAddress, RTGCPHYS PhysAddr)
{
- NOREF(pVM);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
pAddress->Sel = DBGF_SEL_FLAT;
pAddress->off = PhysAddr;
pAddress->FlatPtr = PhysAddr;
@@ -195,12 +215,12 @@ VMMR3DECL(PDBGFADDRESS) DBGFR3AddrFromPhys(PVM pVM, PDBGFADDRESS pAddress, RTGCP
*
* @returns true if valid.
* @returns false if invalid.
- * @param pVM Pointer to the VM.
- * @param pAddress The address to validate.
+ * @param pUVM The user mode VM handle.
+ * @param pAddress The address to validate.
*/
-VMMR3DECL(bool) DBGFR3AddrIsValid(PVM pVM, PCDBGFADDRESS pAddress)
+VMMR3DECL(bool) DBGFR3AddrIsValid(PUVM pUVM, PCDBGFADDRESS pAddress)
{
- NOREF(pVM);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
if (!VALID_PTR(pAddress))
return false;
if (!DBGFADDRESS_IS_VALID(pAddress))
@@ -240,13 +260,13 @@ static DECLCALLBACK(int) dbgfR3AddrToPhysOnVCpu(PVMCPU pVCpu, PDBGFADDRESS pAddr
* @retval VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
* @retval VERR_PAGE_MAP_LEVEL4_NOT_PRESENT
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The ID of the CPU context to convert virtual
* addresses.
* @param pAddress The address.
* @param pGCPhys Where to return the physical address.
*/
-VMMR3DECL(int) DBGFR3AddrToPhys(PVM pVM, VMCPUID idCpu, PDBGFADDRESS pAddress, PRTGCPHYS pGCPhys)
+VMMR3DECL(int) DBGFR3AddrToPhys(PUVM pUVM, VMCPUID idCpu, PDBGFADDRESS pAddress, PRTGCPHYS pGCPhys)
{
/*
* Parameter validation.
@@ -255,8 +275,10 @@ VMMR3DECL(int) DBGFR3AddrToPhys(PVM pVM, VMCPUID idCpu, PDBGFADDRESS pAddress,
*pGCPhys = NIL_RTGCPHYS;
AssertPtr(pAddress);
AssertReturn(DBGFADDRESS_IS_VALID(pAddress), VERR_INVALID_PARAMETER);
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_STATE);
- AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_STATE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_PARAMETER);
/*
* Convert by address type.
@@ -275,8 +297,8 @@ VMMR3DECL(int) DBGFR3AddrToPhys(PVM pVM, VMCPUID idCpu, PDBGFADDRESS pAddress,
if (VMCPU_IS_EMT(pVCpu))
rc = dbgfR3AddrToPhysOnVCpu(pVCpu, pAddress, pGCPhys);
else
- rc = VMR3ReqPriorityCallWait(pVCpu->pVMR3, pVCpu->idCpu,
- (PFNRT)dbgfR3AddrToPhysOnVCpu, 3, pVCpu, pAddress, pGCPhys);
+ rc = VMR3ReqPriorityCallWaitU(pUVM, pVCpu->idCpu,
+ (PFNRT)dbgfR3AddrToPhysOnVCpu, 3, pVCpu, pAddress, pGCPhys);
}
return rc;
}
@@ -298,13 +320,13 @@ VMMR3DECL(int) DBGFR3AddrToPhys(PVM pVM, VMCPUID idCpu, PDBGFADDRESS pAddress,
* @retval VERR_PGM_PHYS_PAGE_RESERVED
* @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The ID of the CPU context to convert virtual
* addresses.
* @param pAddress The address.
* @param pHCPhys Where to return the physical address.
*/
-VMMR3DECL(int) DBGFR3AddrToHostPhys(PVM pVM, VMCPUID idCpu, PDBGFADDRESS pAddress, PRTHCPHYS pHCPhys)
+VMMR3DECL(int) DBGFR3AddrToHostPhys(PUVM pUVM, VMCPUID idCpu, PDBGFADDRESS pAddress, PRTHCPHYS pHCPhys)
{
/*
* Parameter validation.
@@ -313,8 +335,10 @@ VMMR3DECL(int) DBGFR3AddrToHostPhys(PVM pVM, VMCPUID idCpu, PDBGFADDRESS pAddre
*pHCPhys = NIL_RTHCPHYS;
AssertPtr(pAddress);
AssertReturn(DBGFADDRESS_IS_VALID(pAddress), VERR_INVALID_PARAMETER);
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_STATE);
- AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_STATE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_PARAMETER);
/*
* Convert it if we can.
@@ -325,7 +349,7 @@ VMMR3DECL(int) DBGFR3AddrToHostPhys(PVM pVM, VMCPUID idCpu, PDBGFADDRESS pAddre
else
{
RTGCPHYS GCPhys;
- rc = DBGFR3AddrToPhys(pVM, idCpu, pAddress, &GCPhys);
+ rc = DBGFR3AddrToPhys(pUVM, idCpu, pAddress, &GCPhys);
if (RT_SUCCESS(rc))
rc = PGMPhysGCPhys2HCPhys(pVM, pAddress->FlatPtr, pHCPhys);
}
@@ -338,14 +362,17 @@ VMMR3DECL(int) DBGFR3AddrToHostPhys(PVM pVM, VMCPUID idCpu, PDBGFADDRESS pAddre
*
* @returns VBox status code.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The ID of the CPU context.
* @param pAddress The address.
* @param fReadOnly Whether returning a read-only page is fine or not.
* @param ppvR3Ptr Where to return the address.
*/
-static DECLCALLBACK(int) dbgfR3AddrToVolatileR3PtrOnVCpu(PVM pVM, VMCPUID idCpu, PDBGFADDRESS pAddress, bool fReadOnly, void **ppvR3Ptr)
+static DECLCALLBACK(int) dbgfR3AddrToVolatileR3PtrOnVCpu(PUVM pUVM, VMCPUID idCpu, PDBGFADDRESS pAddress, bool fReadOnly,
+ void **ppvR3Ptr)
{
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
Assert(idCpu == VMMGetCpuId(pVM));
int rc;
@@ -353,7 +380,8 @@ static DECLCALLBACK(int) dbgfR3AddrToVolatileR3PtrOnVCpu(PVM pVM, VMCPUID idCpu,
{
rc = VERR_NOT_SUPPORTED; /** @todo create some dedicated errors for this stuff. */
/** @todo this may assert, create a debug version of this which doesn't. */
- if (MMHyperIsInsideArea(pVM, pAddress->FlatPtr))
+ if ( !HMIsEnabled(pVM)
+ && MMHyperIsInsideArea(pVM, pAddress->FlatPtr))
{
void *pv = MMHyperRCToCC(pVM, (RTRCPTR)pAddress->FlatPtr);
if (pv)
@@ -409,7 +437,7 @@ static DECLCALLBACK(int) dbgfR3AddrToVolatileR3PtrOnVCpu(PVM pVM, VMCPUID idCpu,
* @retval VERR_PGM_PHYS_PAGE_RESERVED
* @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The ID of the CPU context to convert virtual
* addresses.
* @param pAddress The address.
@@ -418,7 +446,7 @@ static DECLCALLBACK(int) dbgfR3AddrToVolatileR3PtrOnVCpu(PVM pVM, VMCPUID idCpu,
* before we return.
* @param ppvR3Ptr Where to return the address.
*/
-VMMR3DECL(int) DBGFR3AddrToVolatileR3Ptr(PVM pVM, VMCPUID idCpu, PDBGFADDRESS pAddress, bool fReadOnly, void **ppvR3Ptr)
+VMMR3DECL(int) DBGFR3AddrToVolatileR3Ptr(PUVM pUVM, VMCPUID idCpu, PDBGFADDRESS pAddress, bool fReadOnly, void **ppvR3Ptr)
{
/*
* Parameter validation.
@@ -427,13 +455,14 @@ VMMR3DECL(int) DBGFR3AddrToVolatileR3Ptr(PVM pVM, VMCPUID idCpu, PDBGFADDRESS p
*ppvR3Ptr = NULL;
AssertPtr(pAddress);
AssertReturn(DBGFADDRESS_IS_VALID(pAddress), VERR_INVALID_PARAMETER);
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_STATE);
- AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_STATE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_PARAMETER);
/*
* Convert it.
*/
- return VMR3ReqPriorityCallWait(pVM, idCpu, (PFNRT)dbgfR3AddrToVolatileR3PtrOnVCpu, 5, pVM, idCpu, pAddress, fReadOnly, ppvR3Ptr);
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3AddrToVolatileR3PtrOnVCpu, 5,
+ pUVM, idCpu, pAddress, fReadOnly, ppvR3Ptr);
}
diff --git a/src/VBox/VMM/VMMR3/DBGFAddrSpace.cpp b/src/VBox/VMM/VMMR3/DBGFAddrSpace.cpp
index 3861be8f..dbc2c00f 100644
--- a/src/VBox/VMM/VMMR3/DBGFAddrSpace.cpp
+++ b/src/VBox/VMM/VMMR3/DBGFAddrSpace.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2008 Oracle Corporation
+ * Copyright (C) 2008-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -38,9 +38,14 @@
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_DBGF
#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/hm.h>
#include <VBox/vmm/pdmapi.h>
#include <VBox/vmm/mm.h>
+#ifdef VBOX_WITH_RAW_MODE
+# include <VBox/vmm/patm.h>
+#endif
#include "DBGFInternal.h"
+#include <VBox/vmm/uvm.h>
#include <VBox/vmm/vm.h>
#include <VBox/err.h>
#include <VBox/log.h>
@@ -49,6 +54,7 @@
#include <iprt/assert.h>
#include <iprt/ctype.h>
#include <iprt/env.h>
+#include <iprt/mem.h>
#include <iprt/path.h>
#include <iprt/param.h>
@@ -78,10 +84,10 @@ typedef DBGFASDBNODE *PDBGFASDBNODE;
*/
typedef struct DBGFR3ASLOADOPENDATA
{
- const char *pszModName;
- RTGCUINTPTR uSubtrahend;
- uint32_t fFlags;
- RTDBGMOD hMod;
+ const char *pszModName;
+ RTGCUINTPTR uSubtrahend;
+ uint32_t fFlags;
+ RTDBGMOD hMod;
} DBGFR3ASLOADOPENDATA;
/**
@@ -100,30 +106,30 @@ typedef FNDBGFR3ASSEARCHOPEN *PFNDBGFR3ASSEARCHOPEN;
* Defined Constants And Macros *
*******************************************************************************/
/** Locks the address space database for writing. */
-#define DBGF_AS_DB_LOCK_WRITE(pVM) \
+#define DBGF_AS_DB_LOCK_WRITE(pUVM) \
do { \
- int rcSem = RTSemRWRequestWrite((pVM)->dbgf.s.hAsDbLock, RT_INDEFINITE_WAIT); \
+ int rcSem = RTSemRWRequestWrite((pUVM)->dbgf.s.hAsDbLock, RT_INDEFINITE_WAIT); \
AssertRC(rcSem); \
} while (0)
/** Unlocks the address space database after writing. */
-#define DBGF_AS_DB_UNLOCK_WRITE(pVM) \
+#define DBGF_AS_DB_UNLOCK_WRITE(pUVM) \
do { \
- int rcSem = RTSemRWReleaseWrite((pVM)->dbgf.s.hAsDbLock); \
+ int rcSem = RTSemRWReleaseWrite((pUVM)->dbgf.s.hAsDbLock); \
AssertRC(rcSem); \
} while (0)
/** Locks the address space database for reading. */
-#define DBGF_AS_DB_LOCK_READ(pVM) \
+#define DBGF_AS_DB_LOCK_READ(pUVM) \
do { \
- int rcSem = RTSemRWRequestRead((pVM)->dbgf.s.hAsDbLock, RT_INDEFINITE_WAIT); \
+ int rcSem = RTSemRWRequestRead((pUVM)->dbgf.s.hAsDbLock, RT_INDEFINITE_WAIT); \
AssertRC(rcSem); \
} while (0)
/** Unlocks the address space database after reading. */
-#define DBGF_AS_DB_UNLOCK_READ(pVM) \
+#define DBGF_AS_DB_UNLOCK_READ(pUVM) \
do { \
- int rcSem = RTSemRWReleaseRead((pVM)->dbgf.s.hAsDbLock); \
+ int rcSem = RTSemRWReleaseRead((pUVM)->dbgf.s.hAsDbLock); \
AssertRC(rcSem); \
} while (0)
@@ -133,14 +139,86 @@ typedef FNDBGFR3ASSEARCHOPEN *PFNDBGFR3ASSEARCHOPEN;
* Initializes the address space parts of DBGF.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
*/
-int dbgfR3AsInit(PVM pVM)
+int dbgfR3AsInit(PUVM pUVM)
{
+ Assert(pUVM->pVM);
+
/*
* Create the semaphore.
*/
- int rc = RTSemRWCreate(&pVM->dbgf.s.hAsDbLock);
+ int rc = RTSemRWCreate(&pUVM->dbgf.s.hAsDbLock);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Create the debugging config instance and set it up, defaulting to
+ * deferred loading in order to keep things fast.
+ */
+ rc = RTDbgCfgCreate(&pUVM->dbgf.s.hDbgCfg, NULL, true /*fNativePaths*/);
+ AssertRCReturn(rc, rc);
+ rc = RTDbgCfgChangeUInt(pUVM->dbgf.s.hDbgCfg, RTDBGCFGPROP_FLAGS, RTDBGCFGOP_PREPEND,
+ RTDBGCFG_FLAGS_DEFERRED);
+ AssertRCReturn(rc, rc);
+
+ static struct
+ {
+ RTDBGCFGPROP enmProp;
+ const char *pszEnvName;
+ const char *pszCfgName;
+ } const s_aProps[] =
+ {
+ { RTDBGCFGPROP_FLAGS, "VBOXDBG_FLAGS", "Flags" },
+ { RTDBGCFGPROP_PATH, "VBOXDBG_PATH", "Path" },
+ { RTDBGCFGPROP_SUFFIXES, "VBOXDBG_SUFFIXES", "Suffixes" },
+ { RTDBGCFGPROP_SRC_PATH, "VBOXDBG_SRC_PATH", "SrcPath" },
+ };
+ PCFGMNODE pCfgDbgf = CFGMR3GetChild(CFGMR3GetRootU(pUVM), "/DBGF");
+ for (unsigned i = 0; i < RT_ELEMENTS(s_aProps); i++)
+ {
+ char szEnvValue[8192];
+ rc = RTEnvGetEx(RTENV_DEFAULT, s_aProps[i].pszEnvName, szEnvValue, sizeof(szEnvValue), NULL);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTDbgCfgChangeString(pUVM->dbgf.s.hDbgCfg, s_aProps[i].enmProp, RTDBGCFGOP_PREPEND, szEnvValue);
+ if (RT_FAILURE(rc))
+ return VMR3SetError(pUVM, rc, RT_SRC_POS,
+ "DBGF Config Error: %s=%s -> %Rrc", s_aProps[i].pszEnvName, szEnvValue, rc);
+ }
+ else if (rc != VERR_ENV_VAR_NOT_FOUND)
+ return VMR3SetError(pUVM, rc, RT_SRC_POS,
+ "DBGF Config Error: Error querying env.var. %s: %Rrc", s_aProps[i].pszEnvName, rc);
+
+ char *pszCfgValue;
+ rc = CFGMR3QueryStringAllocDef(pCfgDbgf, s_aProps[i].pszCfgName, &pszCfgValue, NULL);
+ if (RT_FAILURE(rc))
+ return VMR3SetError(pUVM, rc, RT_SRC_POS,
+ "DBGF Config Error: Querying /DBGF/%s -> %Rrc", s_aProps[i].pszCfgName, rc);
+ if (pszCfgValue)
+ {
+ rc = RTDbgCfgChangeString(pUVM->dbgf.s.hDbgCfg, s_aProps[i].enmProp, RTDBGCFGOP_PREPEND, pszCfgValue);
+ if (RT_FAILURE(rc))
+ return VMR3SetError(pUVM, rc, RT_SRC_POS,
+ "DBGF Config Error: /DBGF/%s=%s -> %Rrc", s_aProps[i].pszCfgName, pszCfgValue, rc);
+ }
+ }
+
+ /*
+ * Prepend the NoArch and VBoxDbgSyms directories to the path.
+ */
+ char szPath[RTPATH_MAX];
+ rc = RTPathAppPrivateNoArch(szPath, sizeof(szPath));
+ AssertRCReturn(rc, rc);
+#ifdef RT_OS_DARWIN
+ rc = RTPathAppend(szPath, sizeof(szPath), "../Resources/VBoxDbgSyms/");
+#else
+ rc = RTDbgCfgChangeString(pUVM->dbgf.s.hDbgCfg, RTDBGCFGPROP_PATH, RTDBGCFGOP_PREPEND, szPath);
+ AssertRCReturn(rc, rc);
+
+ rc = RTPathAppend(szPath, sizeof(szPath), "VBoxDbgSyms/");
+#endif
+ AssertRCReturn(rc, rc);
+ rc = RTDbgCfgChangeString(pUVM->dbgf.s.hDbgCfg, RTDBGCFGPROP_PATH, RTDBGCFGOP_PREPEND, szPath);
AssertRCReturn(rc, rc);
/*
@@ -149,36 +227,36 @@ int dbgfR3AsInit(PVM pVM)
RTDBGAS hDbgAs;
rc = RTDbgAsCreate(&hDbgAs, 0, RTGCPTR_MAX, "Global");
AssertRCReturn(rc, rc);
- rc = DBGFR3AsAdd(pVM, hDbgAs, NIL_RTPROCESS);
+ rc = DBGFR3AsAdd(pUVM, hDbgAs, NIL_RTPROCESS);
AssertRCReturn(rc, rc);
RTDbgAsRetain(hDbgAs);
- pVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_GLOBAL)] = hDbgAs;
+ pUVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_GLOBAL)] = hDbgAs;
RTDbgAsRetain(hDbgAs);
- pVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_KERNEL)] = hDbgAs;
+ pUVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_KERNEL)] = hDbgAs;
rc = RTDbgAsCreate(&hDbgAs, 0, RTGCPHYS_MAX, "Physical");
AssertRCReturn(rc, rc);
- rc = DBGFR3AsAdd(pVM, hDbgAs, NIL_RTPROCESS);
+ rc = DBGFR3AsAdd(pUVM, hDbgAs, NIL_RTPROCESS);
AssertRCReturn(rc, rc);
RTDbgAsRetain(hDbgAs);
- pVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_PHYS)] = hDbgAs;
+ pUVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_PHYS)] = hDbgAs;
rc = RTDbgAsCreate(&hDbgAs, 0, RTRCPTR_MAX, "HyperRawMode");
AssertRCReturn(rc, rc);
- rc = DBGFR3AsAdd(pVM, hDbgAs, NIL_RTPROCESS);
+ rc = DBGFR3AsAdd(pUVM, hDbgAs, NIL_RTPROCESS);
AssertRCReturn(rc, rc);
RTDbgAsRetain(hDbgAs);
- pVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_RC)] = hDbgAs;
+ pUVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_RC)] = hDbgAs;
RTDbgAsRetain(hDbgAs);
- pVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_RC_AND_GC_GLOBAL)] = hDbgAs;
+ pUVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_RC_AND_GC_GLOBAL)] = hDbgAs;
rc = RTDbgAsCreate(&hDbgAs, 0, RTR0PTR_MAX, "HyperRing0");
AssertRCReturn(rc, rc);
- rc = DBGFR3AsAdd(pVM, hDbgAs, NIL_RTPROCESS);
+ rc = DBGFR3AsAdd(pUVM, hDbgAs, NIL_RTPROCESS);
AssertRCReturn(rc, rc);
RTDbgAsRetain(hDbgAs);
- pVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_R0)] = hDbgAs;
+ pUVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_R0)] = hDbgAs;
return VINF_SUCCESS;
}
@@ -206,25 +284,25 @@ static DECLCALLBACK(int) dbgfR3AsTermDestroyNode(PAVLPVNODECORE pNode, void *pvI
/**
* Terminates the address space parts of DBGF.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
*/
-void dbgfR3AsTerm(PVM pVM)
+void dbgfR3AsTerm(PUVM pUVM)
{
/*
* Create the semaphore.
*/
- int rc = RTSemRWDestroy(pVM->dbgf.s.hAsDbLock);
+ int rc = RTSemRWDestroy(pUVM->dbgf.s.hAsDbLock);
AssertRC(rc);
- pVM->dbgf.s.hAsDbLock = NIL_RTSEMRW;
+ pUVM->dbgf.s.hAsDbLock = NIL_RTSEMRW;
/*
* Release all the address spaces.
*/
- RTAvlPVDestroy(&pVM->dbgf.s.AsHandleTree, dbgfR3AsTermDestroyNode, NULL);
- for (size_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.ahAsAliases); i++)
+ RTAvlPVDestroy(&pUVM->dbgf.s.AsHandleTree, dbgfR3AsTermDestroyNode, NULL);
+ for (size_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.ahAsAliases); i++)
{
- RTDbgAsRelease(pVM->dbgf.s.ahAsAliases[i]);
- pVM->dbgf.s.ahAsAliases[i] = NIL_RTDBGAS;
+ RTDbgAsRelease(pUVM->dbgf.s.ahAsAliases[i]);
+ pUVM->dbgf.s.ahAsAliases[i] = NIL_RTDBGAS;
}
}
@@ -232,13 +310,89 @@ void dbgfR3AsTerm(PVM pVM)
/**
* Relocates the RC address space.
*
- * @param pVM Pointer to the VM.
- * @param offDelta The relocation delta.
+ * @param pUVM The user mode VM handle.
+ * @param offDelta The relocation delta.
+ */
+void dbgfR3AsRelocate(PUVM pUVM, RTGCUINTPTR offDelta)
+{
+ /*
+ * We will relocate the raw-mode context modules by offDelta if they have
+ * been injected into the the DBGF_AS_RC map.
+ */
+ if ( pUVM->dbgf.s.afAsAliasPopuplated[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_RC)]
+ && offDelta != 0)
+ {
+ RTDBGAS hAs = pUVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_RC)];
+
+ /* Take a snapshot of the modules as we might have overlapping
+ addresses between the previous and new mapping. */
+ RTDbgAsLockExcl(hAs);
+ uint32_t cModules = RTDbgAsModuleCount(hAs);
+ if (cModules > 0 && cModules < _4K)
+ {
+ struct DBGFASRELOCENTRY
+ {
+ RTDBGMOD hDbgMod;
+ RTRCPTR uOldAddr;
+ } *paEntries = (struct DBGFASRELOCENTRY *)RTMemTmpAllocZ(sizeof(paEntries[0]) * cModules);
+ if (paEntries)
+ {
+ /* Snapshot. */
+ for (uint32_t i = 0; i < cModules; i++)
+ {
+ paEntries[i].hDbgMod = RTDbgAsModuleByIndex(hAs, i);
+ AssertLogRelMsg(paEntries[i].hDbgMod != NIL_RTDBGMOD, ("iModule=%#x\n", i));
+
+ RTDBGASMAPINFO aMappings[1] = { { 0, 0 } };
+ uint32_t cMappings = 1;
+ int rc = RTDbgAsModuleQueryMapByIndex(hAs, i, &aMappings[0], &cMappings, 0 /*fFlags*/);
+ if (RT_SUCCESS(rc) && cMappings == 1 && aMappings[0].iSeg == NIL_RTDBGSEGIDX)
+ paEntries[i].uOldAddr = (RTRCPTR)aMappings[0].Address;
+ else
+ AssertLogRelMsgFailed(("iModule=%#x rc=%Rrc cMappings=%#x.\n", i, rc, cMappings));
+ }
+
+ /* Unlink them. */
+ for (uint32_t i = 0; i < cModules; i++)
+ {
+ int rc = RTDbgAsModuleUnlink(hAs, paEntries[i].hDbgMod);
+ AssertLogRelMsg(RT_SUCCESS(rc), ("iModule=%#x rc=%Rrc hDbgMod=%p\n", i, rc, paEntries[i].hDbgMod));
+ }
+
+ /* Link them at the new locations. */
+ for (uint32_t i = 0; i < cModules; i++)
+ {
+ RTRCPTR uNewAddr = paEntries[i].uOldAddr + offDelta;
+ int rc = RTDbgAsModuleLink(hAs, paEntries[i].hDbgMod, uNewAddr,
+ RTDBGASLINK_FLAGS_REPLACE);
+ AssertLogRelMsg(RT_SUCCESS(rc),
+ ("iModule=%#x rc=%Rrc hDbgMod=%p %RRv -> %RRv\n", i, rc, paEntries[i].hDbgMod,
+ paEntries[i].uOldAddr, uNewAddr));
+ RTDbgModRelease(paEntries[i].hDbgMod);
+ }
+
+ RTMemTmpFree(paEntries);
+ }
+ else
+ AssertLogRelMsgFailed(("No memory for %#x modules.\n", cModules));
+ }
+ else
+ AssertLogRelMsgFailed(("cModules=%#x\n", cModules));
+ RTDbgAsUnlockExcl(hAs);
+ }
+}
+
+
+/**
+ * Gets the IPRT debugging configuration handle (no refs retained).
+ *
+ * @returns Config handle or NIL_RTDBGCFG.
+ * @param pUVM The user mode VM handle.
*/
-void dbgfR3AsRelocate(PVM pVM, RTGCUINTPTR offDelta)
+VMMR3DECL(RTDBGCFG) DBGFR3AsGetConfig(PUVM pUVM)
{
- /** @todo */
- NOREF(pVM); NOREF(offDelta);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NIL_RTDBGCFG);
+ return pUVM->dbgf.s.hDbgCfg;
}
@@ -246,17 +400,17 @@ void dbgfR3AsRelocate(PVM pVM, RTGCUINTPTR offDelta)
* Adds the address space to the database.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param hDbgAs The address space handle. The reference of the
- * caller will NOT be consumed.
- * @param ProcId The process id or NIL_RTPROCESS.
+ * @param pUVM The user mode VM handle.
+ * @param hDbgAs The address space handle. The reference of the caller
+ * will NOT be consumed.
+ * @param ProcId The process id or NIL_RTPROCESS.
*/
-VMMR3DECL(int) DBGFR3AsAdd(PVM pVM, RTDBGAS hDbgAs, RTPROCESS ProcId)
+VMMR3DECL(int) DBGFR3AsAdd(PUVM pUVM, RTDBGAS hDbgAs, RTPROCESS ProcId)
{
/*
* Input validation.
*/
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
const char *pszName = RTDbgAsName(hDbgAs);
if (!pszName)
return VERR_INVALID_HANDLE;
@@ -268,26 +422,26 @@ VMMR3DECL(int) DBGFR3AsAdd(PVM pVM, RTDBGAS hDbgAs, RTPROCESS ProcId)
* Allocate a tracking node.
*/
int rc = VERR_NO_MEMORY;
- PDBGFASDBNODE pDbNode = (PDBGFASDBNODE)MMR3HeapAlloc(pVM, MM_TAG_DBGF_AS, sizeof(*pDbNode));
+ PDBGFASDBNODE pDbNode = (PDBGFASDBNODE)MMR3HeapAllocU(pUVM, MM_TAG_DBGF_AS, sizeof(*pDbNode));
if (pDbNode)
{
pDbNode->HandleCore.Key = hDbgAs;
pDbNode->PidCore.Key = ProcId;
pDbNode->NameCore.pszString = pszName;
pDbNode->NameCore.cchString = strlen(pszName);
- DBGF_AS_DB_LOCK_WRITE(pVM);
- if (RTStrSpaceInsert(&pVM->dbgf.s.AsNameSpace, &pDbNode->NameCore))
+ DBGF_AS_DB_LOCK_WRITE(pUVM);
+ if (RTStrSpaceInsert(&pUVM->dbgf.s.AsNameSpace, &pDbNode->NameCore))
{
- if (RTAvlPVInsert(&pVM->dbgf.s.AsHandleTree, &pDbNode->HandleCore))
+ if (RTAvlPVInsert(&pUVM->dbgf.s.AsHandleTree, &pDbNode->HandleCore))
{
- DBGF_AS_DB_UNLOCK_WRITE(pVM);
+ DBGF_AS_DB_UNLOCK_WRITE(pUVM);
return VINF_SUCCESS;
}
/* bail out */
- RTStrSpaceRemove(&pVM->dbgf.s.AsNameSpace, pszName);
+ RTStrSpaceRemove(&pUVM->dbgf.s.AsNameSpace, pszName);
}
- DBGF_AS_DB_UNLOCK_WRITE(pVM);
+ DBGF_AS_DB_UNLOCK_WRITE(pUVM);
MMR3HeapFree(pDbNode);
}
RTDbgAsRelease(hDbgAs);
@@ -304,16 +458,16 @@ VMMR3DECL(int) DBGFR3AsAdd(PVM pVM, RTDBGAS hDbgAs, RTPROCESS ProcId)
* @retval VERR_SHARING_VIOLATION if in use as an alias.
* @retval VERR_NOT_FOUND if not found in the address space database.
*
- * @param pVM Pointer to the VM.
- * @param hDbgAs The address space handle. Aliases are not allowed.
+ * @param pUVM The user mode VM handle.
+ * @param hDbgAs The address space handle. Aliases are not allowed.
*/
-VMMR3DECL(int) DBGFR3AsDelete(PVM pVM, RTDBGAS hDbgAs)
+VMMR3DECL(int) DBGFR3AsDelete(PUVM pUVM, RTDBGAS hDbgAs)
{
/*
* Input validation. Retain the address space so it can be released outside
* the lock as well as validated.
*/
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
if (hDbgAs == NIL_RTDBGAS)
return VINF_SUCCESS;
uint32_t cRefs = RTDbgAsRetain(hDbgAs);
@@ -321,32 +475,32 @@ VMMR3DECL(int) DBGFR3AsDelete(PVM pVM, RTDBGAS hDbgAs)
return VERR_INVALID_HANDLE;
RTDbgAsRelease(hDbgAs);
- DBGF_AS_DB_LOCK_WRITE(pVM);
+ DBGF_AS_DB_LOCK_WRITE(pUVM);
/*
* You cannot delete any of the aliases.
*/
- for (size_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.ahAsAliases); i++)
- if (pVM->dbgf.s.ahAsAliases[i] == hDbgAs)
+ for (size_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.ahAsAliases); i++)
+ if (pUVM->dbgf.s.ahAsAliases[i] == hDbgAs)
{
- DBGF_AS_DB_UNLOCK_WRITE(pVM);
+ DBGF_AS_DB_UNLOCK_WRITE(pUVM);
return VERR_SHARING_VIOLATION;
}
/*
* Ok, try remove it from the database.
*/
- PDBGFASDBNODE pDbNode = (PDBGFASDBNODE)RTAvlPVRemove(&pVM->dbgf.s.AsHandleTree, hDbgAs);
+ PDBGFASDBNODE pDbNode = (PDBGFASDBNODE)RTAvlPVRemove(&pUVM->dbgf.s.AsHandleTree, hDbgAs);
if (!pDbNode)
{
- DBGF_AS_DB_UNLOCK_WRITE(pVM);
+ DBGF_AS_DB_UNLOCK_WRITE(pUVM);
return VERR_NOT_FOUND;
}
- RTStrSpaceRemove(&pVM->dbgf.s.AsNameSpace, pDbNode->NameCore.pszString);
+ RTStrSpaceRemove(&pUVM->dbgf.s.AsNameSpace, pDbNode->NameCore.pszString);
if (pDbNode->PidCore.Key != NIL_RTPROCESS)
- RTAvlU32Remove(&pVM->dbgf.s.AsPidTree, pDbNode->PidCore.Key);
+ RTAvlU32Remove(&pUVM->dbgf.s.AsPidTree, pDbNode->PidCore.Key);
- DBGF_AS_DB_UNLOCK_WRITE(pVM);
+ DBGF_AS_DB_UNLOCK_WRITE(pUVM);
/*
* Free the resources.
@@ -365,21 +519,21 @@ VMMR3DECL(int) DBGFR3AsDelete(PVM pVM, RTDBGAS hDbgAs)
* and DBGF_AS_KERNEL.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param hAlias The alias to change.
- * @param hAliasFor The address space hAlias should be an alias for.
- * This can be an alias. The caller's reference to
- * this address space will NOT be consumed.
+ * @param pUVM The user mode VM handle.
+ * @param hAlias The alias to change.
+ * @param hAliasFor The address space hAlias should be an alias for. This
+ * can be an alias. The caller's reference to this address
+ * space will NOT be consumed.
*/
-VMMR3DECL(int) DBGFR3AsSetAlias(PVM pVM, RTDBGAS hAlias, RTDBGAS hAliasFor)
+VMMR3DECL(int) DBGFR3AsSetAlias(PUVM pUVM, RTDBGAS hAlias, RTDBGAS hAliasFor)
{
/*
* Input validation.
*/
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
AssertMsgReturn(DBGF_AS_IS_ALIAS(hAlias), ("%p\n", hAlias), VERR_INVALID_PARAMETER);
AssertMsgReturn(!DBGF_AS_IS_FIXED_ALIAS(hAlias), ("%p\n", hAlias), VERR_INVALID_PARAMETER);
- RTDBGAS hRealAliasFor = DBGFR3AsResolveAndRetain(pVM, hAliasFor);
+ RTDBGAS hRealAliasFor = DBGFR3AsResolveAndRetain(pUVM, hAliasFor);
if (hRealAliasFor == NIL_RTDBGAS)
return VERR_INVALID_HANDLE;
@@ -387,19 +541,19 @@ VMMR3DECL(int) DBGFR3AsSetAlias(PVM pVM, RTDBGAS hAlias, RTDBGAS hAliasFor)
* Make sure the handle has is already in the database.
*/
int rc = VERR_NOT_FOUND;
- DBGF_AS_DB_LOCK_WRITE(pVM);
- if (RTAvlPVGet(&pVM->dbgf.s.AsHandleTree, hRealAliasFor))
+ DBGF_AS_DB_LOCK_WRITE(pUVM);
+ if (RTAvlPVGet(&pUVM->dbgf.s.AsHandleTree, hRealAliasFor))
{
/*
* Update the alias table and release the current address space.
*/
RTDBGAS hAsOld;
- ASMAtomicXchgHandle(&pVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(hAlias)], hRealAliasFor, &hAsOld);
+ ASMAtomicXchgHandle(&pUVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(hAlias)], hRealAliasFor, &hAsOld);
uint32_t cRefs = RTDbgAsRelease(hAsOld);
Assert(cRefs > 0); Assert(cRefs != UINT32_MAX); NOREF(cRefs);
rc = VINF_SUCCESS;
}
- DBGF_AS_DB_UNLOCK_WRITE(pVM);
+ DBGF_AS_DB_UNLOCK_WRITE(pUVM);
return rc;
}
@@ -409,15 +563,15 @@ VMMR3DECL(int) DBGFR3AsSetAlias(PVM pVM, RTDBGAS hAlias, RTDBGAS hAliasFor)
* @callback_method_impl{FNPDMR3ENUM}
*/
static DECLCALLBACK(int) dbgfR3AsLazyPopulateR0Callback(PVM pVM, const char *pszFilename, const char *pszName,
- RTUINTPTR ImageBase, size_t cbImage, bool fRC, void *pvArg)
+ RTUINTPTR ImageBase, size_t cbImage, PDMLDRCTX enmCtx, void *pvArg)
{
NOREF(pVM); NOREF(cbImage);
/* Only ring-0 modules. */
- if (!fRC)
+ if (enmCtx == PDMLDRCTX_RING_0)
{
RTDBGMOD hDbgMod;
- int rc = RTDbgModCreateFromImage(&hDbgMod, pszFilename, pszName, 0 /*fFlags*/);
+ int rc = RTDbgModCreateFromImage(&hDbgMod, pszFilename, pszName, RTLDRARCH_HOST, pVM->pUVM->dbgf.s.hDbgCfg);
if (RT_SUCCESS(rc))
{
rc = RTDbgAsModuleLink((RTDBGAS)pvArg, hDbgMod, ImageBase, 0 /*fFlags*/);
@@ -434,25 +588,64 @@ static DECLCALLBACK(int) dbgfR3AsLazyPopulateR0Callback(PVM pVM, const char *psz
/**
+ * @callback_method_impl{FNPDMR3ENUM}
+ */
+static DECLCALLBACK(int) dbgfR3AsLazyPopulateRCCallback(PVM pVM, const char *pszFilename, const char *pszName,
+ RTUINTPTR ImageBase, size_t cbImage, PDMLDRCTX enmCtx, void *pvArg)
+{
+ NOREF(pVM); NOREF(cbImage);
+
+ /* Only raw-mode modules. */
+ if (enmCtx == PDMLDRCTX_RAW_MODE)
+ {
+ RTDBGMOD hDbgMod;
+ int rc = RTDbgModCreateFromImage(&hDbgMod, pszFilename, pszName, RTLDRARCH_X86_32, pVM->pUVM->dbgf.s.hDbgCfg);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTDbgAsModuleLink((RTDBGAS)pvArg, hDbgMod, ImageBase, 0 /*fFlags*/);
+ if (RT_FAILURE(rc))
+ LogRel(("DBGF: Failed to link module \"%s\" into DBGF_AS_RC at %RTptr: %Rrc\n",
+ pszName, ImageBase, rc));
+ }
+ else
+ LogRel(("DBGF: RTDbgModCreateFromImage failed with rc=%Rrc for module \"%s\" (%s)\n",
+ rc, pszName, pszFilename));
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
* Lazily populates the specified address space.
*
- * @param pVM Pointer to the VM.
- * @param hAlias The alias.
+ * @param pUVM The user mode VM handle.
+ * @param hAlias The alias.
*/
-static void dbgfR3AsLazyPopulate(PVM pVM, RTDBGAS hAlias)
+static void dbgfR3AsLazyPopulate(PUVM pUVM, RTDBGAS hAlias)
{
- DBGF_AS_DB_LOCK_WRITE(pVM);
+ DBGF_AS_DB_LOCK_WRITE(pUVM);
uintptr_t iAlias = DBGF_AS_ALIAS_2_INDEX(hAlias);
- if (!pVM->dbgf.s.afAsAliasPopuplated[iAlias])
+ if (!pUVM->dbgf.s.afAsAliasPopuplated[iAlias])
{
- RTDBGAS hAs = pVM->dbgf.s.ahAsAliases[iAlias];
- if (hAlias == DBGF_AS_R0)
- PDMR3LdrEnumModules(pVM, dbgfR3AsLazyPopulateR0Callback, hAs);
- /** @todo what do we do about DBGF_AS_RC? */
+ RTDBGAS hDbgAs = pUVM->dbgf.s.ahAsAliases[iAlias];
+ if (hAlias == DBGF_AS_R0 && pUVM->pVM)
+ PDMR3LdrEnumModules(pUVM->pVM, dbgfR3AsLazyPopulateR0Callback, hDbgAs);
+ else if (hAlias == DBGF_AS_RC && pUVM->pVM && !HMIsEnabled(pUVM->pVM))
+ {
+ LogRel(("DBGF: Lazy init of RC address space\n"));
+ PDMR3LdrEnumModules(pUVM->pVM, dbgfR3AsLazyPopulateRCCallback, hDbgAs);
+#ifdef VBOX_WITH_RAW_MODE
+ PATMR3DbgPopulateAddrSpace(pUVM->pVM, hDbgAs);
+#endif
+ }
+ else if (hAlias == DBGF_AS_PHYS && pUVM->pVM)
+ {
+ /** @todo Lazy load pc and vga bios symbols or the EFI stuff. */
+ }
- pVM->dbgf.s.afAsAliasPopuplated[iAlias] = true;
+ pUVM->dbgf.s.afAsAliasPopuplated[iAlias] = true;
}
- DBGF_AS_DB_UNLOCK_WRITE(pVM);
+ DBGF_AS_DB_UNLOCK_WRITE(pUVM);
}
@@ -461,19 +654,19 @@ static void dbgfR3AsLazyPopulate(PVM pVM, RTDBGAS hAlias)
*
* @returns Real address space handle. NIL_RTDBGAS if invalid handle.
*
- * @param pVM Pointer to the VM.
- * @param hAlias The possibly address space alias.
+ * @param pUVM The user mode VM handle.
+ * @param hAlias The possibly address space alias.
*
* @remarks Doesn't take any locks.
*/
-VMMR3DECL(RTDBGAS) DBGFR3AsResolve(PVM pVM, RTDBGAS hAlias)
+VMMR3DECL(RTDBGAS) DBGFR3AsResolve(PUVM pUVM, RTDBGAS hAlias)
{
- VM_ASSERT_VALID_EXT_RETURN(pVM, NULL);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
AssertCompileNS(NIL_RTDBGAS == (RTDBGAS)0);
uintptr_t iAlias = DBGF_AS_ALIAS_2_INDEX(hAlias);
if (iAlias < DBGF_AS_COUNT)
- ASMAtomicReadHandle(&pVM->dbgf.s.ahAsAliases[iAlias], &hAlias);
+ ASMAtomicReadHandle(&pUVM->dbgf.s.ahAsAliases[iAlias], &hAlias);
return hAlias;
}
@@ -484,12 +677,12 @@ VMMR3DECL(RTDBGAS) DBGFR3AsResolve(PVM pVM, RTDBGAS hAlias)
*
* @returns Real address space handle. NIL_RTDBGAS if invalid handle.
*
- * @param pVM Pointer to the VM.
- * @param hAlias The possibly address space alias.
+ * @param pUVM The user mode VM handle.
+ * @param hAlias The possibly address space alias.
*/
-VMMR3DECL(RTDBGAS) DBGFR3AsResolveAndRetain(PVM pVM, RTDBGAS hAlias)
+VMMR3DECL(RTDBGAS) DBGFR3AsResolveAndRetain(PUVM pUVM, RTDBGAS hAlias)
{
- VM_ASSERT_VALID_EXT_RETURN(pVM, NULL);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
AssertCompileNS(NIL_RTDBGAS == (RTDBGAS)0);
uint32_t cRefs;
@@ -499,20 +692,20 @@ VMMR3DECL(RTDBGAS) DBGFR3AsResolveAndRetain(PVM pVM, RTDBGAS hAlias)
if (DBGF_AS_IS_FIXED_ALIAS(hAlias))
{
/* Perform lazy address space population. */
- if (!pVM->dbgf.s.afAsAliasPopuplated[iAlias])
- dbgfR3AsLazyPopulate(pVM, hAlias);
+ if (!pUVM->dbgf.s.afAsAliasPopuplated[iAlias])
+ dbgfR3AsLazyPopulate(pUVM, hAlias);
/* Won't ever change, no need to grab the lock. */
- hAlias = pVM->dbgf.s.ahAsAliases[iAlias];
+ hAlias = pUVM->dbgf.s.ahAsAliases[iAlias];
cRefs = RTDbgAsRetain(hAlias);
}
else
{
/* May change, grab the lock so we can read it safely. */
- DBGF_AS_DB_LOCK_READ(pVM);
- hAlias = pVM->dbgf.s.ahAsAliases[iAlias];
+ DBGF_AS_DB_LOCK_READ(pUVM);
+ hAlias = pUVM->dbgf.s.ahAsAliases[iAlias];
cRefs = RTDbgAsRetain(hAlias);
- DBGF_AS_DB_UNLOCK_READ(pVM);
+ DBGF_AS_DB_UNLOCK_READ(pUVM);
}
}
else
@@ -528,15 +721,15 @@ VMMR3DECL(RTDBGAS) DBGFR3AsResolveAndRetain(PVM pVM, RTDBGAS hAlias)
*
* @returns Retained address space handle if found, NIL_RTDBGAS if not.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszName The name.
*/
-VMMR3DECL(RTDBGAS) DBGFR3AsQueryByName(PVM pVM, const char *pszName)
+VMMR3DECL(RTDBGAS) DBGFR3AsQueryByName(PUVM pUVM, const char *pszName)
{
/*
* Validate the input.
*/
- VM_ASSERT_VALID_EXT_RETURN(pVM, NIL_RTDBGAS);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NIL_RTDBGAS);
AssertPtrReturn(pszName, NIL_RTDBGAS);
AssertReturn(*pszName, NIL_RTDBGAS);
@@ -544,9 +737,9 @@ VMMR3DECL(RTDBGAS) DBGFR3AsQueryByName(PVM pVM, const char *pszName)
* Look it up in the string space and retain the result.
*/
RTDBGAS hDbgAs = NIL_RTDBGAS;
- DBGF_AS_DB_LOCK_READ(pVM);
+ DBGF_AS_DB_LOCK_READ(pUVM);
- PRTSTRSPACECORE pNode = RTStrSpaceGet(&pVM->dbgf.s.AsNameSpace, pszName);
+ PRTSTRSPACECORE pNode = RTStrSpaceGet(&pUVM->dbgf.s.AsNameSpace, pszName);
if (pNode)
{
PDBGFASDBNODE pDbNode = RT_FROM_MEMBER(pNode, DBGFASDBNODE, NameCore);
@@ -555,8 +748,8 @@ VMMR3DECL(RTDBGAS) DBGFR3AsQueryByName(PVM pVM, const char *pszName)
if (RT_UNLIKELY(cRefs == UINT32_MAX))
hDbgAs = NIL_RTDBGAS;
}
- DBGF_AS_DB_UNLOCK_READ(pVM);
+ DBGF_AS_DB_UNLOCK_READ(pUVM);
return hDbgAs;
}
@@ -566,24 +759,24 @@ VMMR3DECL(RTDBGAS) DBGFR3AsQueryByName(PVM pVM, const char *pszName)
*
* @returns Retained address space handle if found, NIL_RTDBGAS if not.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param ProcId The process ID.
*/
-VMMR3DECL(RTDBGAS) DBGFR3AsQueryByPid(PVM pVM, RTPROCESS ProcId)
+VMMR3DECL(RTDBGAS) DBGFR3AsQueryByPid(PUVM pUVM, RTPROCESS ProcId)
{
/*
* Validate the input.
*/
- VM_ASSERT_VALID_EXT_RETURN(pVM, NIL_RTDBGAS);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NIL_RTDBGAS);
AssertReturn(ProcId != NIL_RTPROCESS, NIL_RTDBGAS);
/*
* Look it up in the PID tree and retain the result.
*/
RTDBGAS hDbgAs = NIL_RTDBGAS;
- DBGF_AS_DB_LOCK_READ(pVM);
+ DBGF_AS_DB_LOCK_READ(pUVM);
- PAVLU32NODECORE pNode = RTAvlU32Get(&pVM->dbgf.s.AsPidTree, ProcId);
+ PAVLU32NODECORE pNode = RTAvlU32Get(&pUVM->dbgf.s.AsPidTree, ProcId);
if (pNode)
{
PDBGFASDBNODE pDbNode = RT_FROM_MEMBER(pNode, DBGFASDBNODE, PidCore);
@@ -592,7 +785,7 @@ VMMR3DECL(RTDBGAS) DBGFR3AsQueryByPid(PVM pVM, RTPROCESS ProcId)
if (RT_UNLIKELY(cRefs == UINT32_MAX))
hDbgAs = NIL_RTDBGAS;
}
- DBGF_AS_DB_UNLOCK_READ(pVM);
+ DBGF_AS_DB_UNLOCK_READ(pUVM);
return hDbgAs;
}
@@ -716,15 +909,17 @@ static int dbgfR3AsSearchEnvPath(const char *pszFilename, const char *pszEnvVar,
* Nothing is done if the CFGM variable isn't set.
*
* @returns VBox status code.
+ * @param pUVM The user mode VM handle.
* @param pszFilename The filename.
* @param pszCfgValue The name of the config variable (under /DBGF/).
* @param pfnOpen The open callback function.
* @param pvUser User argument for the callback.
*/
-static int dbgfR3AsSearchCfgPath(PVM pVM, const char *pszFilename, const char *pszCfgValue, PFNDBGFR3ASSEARCHOPEN pfnOpen, void *pvUser)
+static int dbgfR3AsSearchCfgPath(PUVM pUVM, const char *pszFilename, const char *pszCfgValue,
+ PFNDBGFR3ASSEARCHOPEN pfnOpen, void *pvUser)
{
char *pszPath;
- int rc = CFGMR3QueryStringAllocDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "/DBGF"), pszCfgValue, &pszPath, NULL);
+ int rc = CFGMR3QueryStringAllocDef(CFGMR3GetChild(CFGMR3GetRootU(pUVM), "/DBGF"), pszCfgValue, &pszPath, NULL);
if (RT_FAILURE(rc))
return rc;
if (!pszPath)
@@ -736,20 +931,6 @@ static int dbgfR3AsSearchCfgPath(PVM pVM, const char *pszFilename, const char *p
/**
- * Callback function used by DBGFR3AsLoadImage.
- *
- * @returns VBox status code.
- * @param pszFilename The filename under evaluation.
- * @param pvUser Use arguments (DBGFR3ASLOADOPENDATA).
- */
-static DECLCALLBACK(int) dbgfR3AsLoadImageOpen(const char *pszFilename, void *pvUser)
-{
- DBGFR3ASLOADOPENDATA *pData = (DBGFR3ASLOADOPENDATA *)pvUser;
- return RTDbgModCreateFromImage(&pData->hMod, pszFilename, pData->pszModName, pData->fFlags);
-}
-
-
-/**
* Load symbols from an executable module into the specified address space.
*
* If an module exist at the specified address it will be replaced by this
@@ -757,49 +938,40 @@ static DECLCALLBACK(int) dbgfR3AsLoadImageOpen(const char *pszFilename, void *pv
*
* @returns VBox status code.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param hDbgAs The address space.
* @param pszFilename The filename of the executable module.
* @param pszModName The module name. If NULL, then then the file name
* base is used (no extension or nothing).
+ * @param enmArch The desired architecture, use RTLDRARCH_WHATEVER if
+ * it's not relevant or known.
* @param pModAddress The load address of the module.
* @param iModSeg The segment to load, pass NIL_RTDBGSEGIDX to load
* the whole image.
* @param fFlags Flags reserved for future extensions, must be 0.
*/
-VMMR3DECL(int) DBGFR3AsLoadImage(PVM pVM, RTDBGAS hDbgAs, const char *pszFilename, const char *pszModName, PCDBGFADDRESS pModAddress, RTDBGSEGIDX iModSeg, uint32_t fFlags)
+VMMR3DECL(int) DBGFR3AsLoadImage(PUVM pUVM, RTDBGAS hDbgAs, const char *pszFilename, const char *pszModName, RTLDRARCH enmArch,
+ PCDBGFADDRESS pModAddress, RTDBGSEGIDX iModSeg, uint32_t fFlags)
{
/*
* Validate input
*/
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
- AssertReturn(DBGFR3AddrIsValid(pVM, pModAddress), VERR_INVALID_PARAMETER);
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pModAddress), VERR_INVALID_PARAMETER);
AssertReturn(fFlags == 0, VERR_INVALID_PARAMETER);
- RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pVM, hDbgAs);
+ RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pUVM, hDbgAs);
if (hRealAS == NIL_RTDBGAS)
return VERR_INVALID_HANDLE;
- /*
- * Do the work.
- */
- DBGFR3ASLOADOPENDATA Data;
- Data.pszModName = pszModName;
- Data.uSubtrahend = 0;
- Data.fFlags = 0;
- Data.hMod = NIL_RTDBGMOD;
- int rc = dbgfR3AsSearchCfgPath(pVM, pszFilename, "ImagePath", dbgfR3AsLoadImageOpen, &Data);
- if (RT_FAILURE(rc))
- rc = dbgfR3AsSearchEnvPath(pszFilename, "VBOXDBG_IMAGE_PATH", dbgfR3AsLoadImageOpen, &Data);
- if (RT_FAILURE(rc))
- rc = dbgfR3AsSearchCfgPath(pVM, pszFilename, "Path", dbgfR3AsLoadImageOpen, &Data);
- if (RT_FAILURE(rc))
- rc = dbgfR3AsSearchEnvPath(pszFilename, "VBOXDBG_PATH", dbgfR3AsLoadImageOpen, &Data);
+ RTDBGMOD hDbgMod;
+ int rc = RTDbgModCreateFromImage(&hDbgMod, pszFilename, pszModName, enmArch, pUVM->dbgf.s.hDbgCfg);
if (RT_SUCCESS(rc))
{
- rc = DBGFR3AsLinkModule(pVM, hRealAS, Data.hMod, pModAddress, iModSeg, 0);
+ rc = DBGFR3AsLinkModule(pUVM, hRealAS, hDbgMod, pModAddress, iModSeg, 0);
if (RT_FAILURE(rc))
- RTDbgModRelease(Data.hMod);
+ RTDbgModRelease(hDbgMod);
}
RTDbgAsRelease(hRealAS);
@@ -808,20 +980,6 @@ VMMR3DECL(int) DBGFR3AsLoadImage(PVM pVM, RTDBGAS hDbgAs, const char *pszFilenam
/**
- * Callback function used by DBGFR3AsLoadMap.
- *
- * @returns VBox status code.
- * @param pszFilename The filename under evaluation.
- * @param pvUser Use arguments (DBGFR3ASLOADOPENDATA).
- */
-static DECLCALLBACK(int) dbgfR3AsLoadMapOpen(const char *pszFilename, void *pvUser)
-{
- DBGFR3ASLOADOPENDATA *pData = (DBGFR3ASLOADOPENDATA *)pvUser;
- return RTDbgModCreateFromMap(&pData->hMod, pszFilename, pData->pszModName, pData->uSubtrahend, pData->fFlags);
-}
-
-
-/**
* Load symbols from a map file into a module at the specified address space.
*
* If an module exist at the specified address it will be replaced by this
@@ -829,7 +987,7 @@ static DECLCALLBACK(int) dbgfR3AsLoadMapOpen(const char *pszFilename, void *pvUs
*
* @returns VBox status code.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param hDbgAs The address space.
* @param pszFilename The map file.
* @param pszModName The module name. If NULL, then then the file name
@@ -842,40 +1000,28 @@ static DECLCALLBACK(int) dbgfR3AsLoadMapOpen(const char *pszFilename, void *pvUs
* /proc/kallsyms.
* @param fFlags Flags reserved for future extensions, must be 0.
*/
-VMMR3DECL(int) DBGFR3AsLoadMap(PVM pVM, RTDBGAS hDbgAs, const char *pszFilename, const char *pszModName,
+VMMR3DECL(int) DBGFR3AsLoadMap(PUVM pUVM, RTDBGAS hDbgAs, const char *pszFilename, const char *pszModName,
PCDBGFADDRESS pModAddress, RTDBGSEGIDX iModSeg, RTGCUINTPTR uSubtrahend, uint32_t fFlags)
{
/*
* Validate input
*/
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
- AssertReturn(DBGFR3AddrIsValid(pVM, pModAddress), VERR_INVALID_PARAMETER);
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pModAddress), VERR_INVALID_PARAMETER);
AssertReturn(fFlags == 0, VERR_INVALID_PARAMETER);
- RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pVM, hDbgAs);
+ RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pUVM, hDbgAs);
if (hRealAS == NIL_RTDBGAS)
return VERR_INVALID_HANDLE;
- /*
- * Do the work.
- */
- DBGFR3ASLOADOPENDATA Data;
- Data.pszModName = pszModName;
- Data.uSubtrahend = uSubtrahend;
- Data.fFlags = 0;
- Data.hMod = NIL_RTDBGMOD;
- int rc = dbgfR3AsSearchCfgPath(pVM, pszFilename, "MapPath", dbgfR3AsLoadMapOpen, &Data);
- if (RT_FAILURE(rc))
- rc = dbgfR3AsSearchEnvPath(pszFilename, "VBOXDBG_MAP_PATH", dbgfR3AsLoadMapOpen, &Data);
- if (RT_FAILURE(rc))
- rc = dbgfR3AsSearchCfgPath(pVM, pszFilename, "Path", dbgfR3AsLoadMapOpen, &Data);
- if (RT_FAILURE(rc))
- rc = dbgfR3AsSearchEnvPath(pszFilename, "VBOXDBG_PATH", dbgfR3AsLoadMapOpen, &Data);
+ RTDBGMOD hDbgMod;
+ int rc = RTDbgModCreateFromMap(&hDbgMod, pszFilename, pszModName, uSubtrahend, pUVM->dbgf.s.hDbgCfg);
if (RT_SUCCESS(rc))
{
- rc = DBGFR3AsLinkModule(pVM, hRealAS, Data.hMod, pModAddress, iModSeg, 0);
+ rc = DBGFR3AsLinkModule(pUVM, hRealAS, hDbgMod, pModAddress, iModSeg, 0);
if (RT_FAILURE(rc))
- RTDbgModRelease(Data.hMod);
+ RTDbgModRelease(hDbgMod);
}
RTDbgAsRelease(hRealAS);
@@ -887,21 +1033,22 @@ VMMR3DECL(int) DBGFR3AsLoadMap(PVM pVM, RTDBGAS hDbgAs, const char *pszFilename,
* Wrapper around RTDbgAsModuleLink, RTDbgAsModuleLinkSeg and DBGFR3AsResolve.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param hDbgAs The address space handle.
* @param hMod The module handle.
* @param pModAddress The link address.
* @param iModSeg The segment to link, NIL_RTDBGSEGIDX for the entire image.
* @param fFlags Flags to pass to the link functions, see RTDBGASLINK_FLAGS_*.
*/
-VMMR3DECL(int) DBGFR3AsLinkModule(PVM pVM, RTDBGAS hDbgAs, RTDBGMOD hMod, PCDBGFADDRESS pModAddress, RTDBGSEGIDX iModSeg, uint32_t fFlags)
+VMMR3DECL(int) DBGFR3AsLinkModule(PUVM pUVM, RTDBGAS hDbgAs, RTDBGMOD hMod, PCDBGFADDRESS pModAddress,
+ RTDBGSEGIDX iModSeg, uint32_t fFlags)
{
/*
* Input validation.
*/
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
- AssertReturn(DBGFR3AddrIsValid(pVM, pModAddress), VERR_INVALID_PARAMETER);
- RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pVM, hDbgAs);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pModAddress), VERR_INVALID_PARAMETER);
+ RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pUVM, hDbgAs);
if (hRealAS == NIL_RTDBGAS)
return VERR_INVALID_HANDLE;
@@ -920,6 +1067,54 @@ VMMR3DECL(int) DBGFR3AsLinkModule(PVM pVM, RTDBGAS hDbgAs, RTDBGMOD hMod, PCDBGF
/**
+ * Wrapper around RTDbgAsModuleByName and RTDbgAsModuleUnlink.
+ *
+ * Unlinks all mappings matching the given module name.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param hDbgAs The address space handle.
+ * @param pszModName The name of the module to unlink.
+ */
+VMMR3DECL(int) DBGFR3AsUnlinkModuleByName(PUVM pUVM, RTDBGAS hDbgAs, const char *pszModName)
+{
+ /*
+ * Input validation.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pUVM, hDbgAs);
+ if (hRealAS == NIL_RTDBGAS)
+ return VERR_INVALID_HANDLE;
+
+ /*
+ * Do the job.
+ */
+ RTDBGMOD hMod;
+ int rc = RTDbgAsModuleByName(hRealAS, pszModName, 0, &hMod);
+ if (RT_SUCCESS(rc))
+ {
+ for (;;)
+ {
+ rc = RTDbgAsModuleUnlink(hRealAS, hMod);
+ RTDbgModRelease(hMod);
+ if (RT_FAILURE(rc))
+ break;
+ rc = RTDbgAsModuleByName(hRealAS, pszModName, 0, &hMod);
+ if (RT_FAILURE_NP(rc))
+ {
+ if (rc == VERR_NOT_FOUND)
+ rc = VINF_SUCCESS;
+ break;
+ }
+ }
+ }
+
+ RTDbgAsRelease(hRealAS);
+ return rc;
+}
+
+
+/**
* Adds the module name to the symbol name.
*
* @param pSymbol The symbol info (in/out).
@@ -966,17 +1161,18 @@ static void dbgfR3AsSymbolConvert(PRTDBGSYMBOL pSymbol, PCDBGFSYMBOL pDbgfSym)
*
* @returns VBox status code. See RTDbgAsSymbolByAddr.
*
- * @param pVM Pointer to the VM.
- * @param hDbgAs The address space handle.
- * @param pAddress The address to lookup.
- * @param poffDisp Where to return the distance between the
- * returned symbol and pAddress. Optional.
- * @param pSymbol Where to return the symbol information.
- * The returned symbol name will be prefixed by
- * the module name as far as space allows.
- * @param phMod Where to return the module handle. Optional.
+ * @param pUVM The user mode VM handle.
+ * @param hDbgAs The address space handle.
+ * @param pAddress The address to lookup.
+ * @param fFlags One of the RTDBGSYMADDR_FLAGS_XXX flags.
+ * @param poffDisp Where to return the distance between the returned
+ * symbol and pAddress. Optional.
+ * @param pSymbol Where to return the symbol information. The returned
+ * symbol name will be prefixed by the module name as
+ * far as space allows.
+ * @param phMod Where to return the module handle. Optional.
*/
-VMMR3DECL(int) DBGFR3AsSymbolByAddr(PVM pVM, RTDBGAS hDbgAs, PCDBGFADDRESS pAddress,
+VMMR3DECL(int) DBGFR3AsSymbolByAddr(PUVM pUVM, RTDBGAS hDbgAs, PCDBGFADDRESS pAddress, uint32_t fFlags,
PRTGCINTPTR poffDisp, PRTDBGSYMBOL pSymbol, PRTDBGMOD phMod)
{
/*
@@ -984,17 +1180,17 @@ VMMR3DECL(int) DBGFR3AsSymbolByAddr(PVM pVM, RTDBGAS hDbgAs, PCDBGFADDRESS pAddr
*/
if (hDbgAs == DBGF_AS_RC_AND_GC_GLOBAL)
{
- int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_RC, pAddress, poffDisp, pSymbol, phMod);
+ int rc = DBGFR3AsSymbolByAddr(pUVM, DBGF_AS_RC, pAddress, fFlags, poffDisp, pSymbol, phMod);
if (RT_FAILURE(rc))
- rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, pAddress, poffDisp, pSymbol, phMod);
+ rc = DBGFR3AsSymbolByAddr(pUVM, DBGF_AS_GLOBAL, pAddress, fFlags, poffDisp, pSymbol, phMod);
return rc;
}
/*
* Input validation.
*/
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
- AssertReturn(DBGFR3AddrIsValid(pVM, pAddress), VERR_INVALID_PARAMETER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
AssertPtrNullReturn(poffDisp, VERR_INVALID_POINTER);
AssertPtrReturn(pSymbol, VERR_INVALID_POINTER);
AssertPtrNullReturn(phMod, VERR_INVALID_POINTER);
@@ -1002,7 +1198,7 @@ VMMR3DECL(int) DBGFR3AsSymbolByAddr(PVM pVM, RTDBGAS hDbgAs, PCDBGFADDRESS pAddr
*poffDisp = 0;
if (phMod)
*phMod = NIL_RTDBGMOD;
- RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pVM, hDbgAs);
+ RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pUVM, hDbgAs);
if (hRealAS == NIL_RTDBGAS)
return VERR_INVALID_HANDLE;
@@ -1010,49 +1206,13 @@ VMMR3DECL(int) DBGFR3AsSymbolByAddr(PVM pVM, RTDBGAS hDbgAs, PCDBGFADDRESS pAddr
* Do the lookup.
*/
RTDBGMOD hMod;
- int rc = RTDbgAsSymbolByAddr(hRealAS, pAddress->FlatPtr, RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL, poffDisp, pSymbol, &hMod);
+ int rc = RTDbgAsSymbolByAddr(hRealAS, pAddress->FlatPtr, fFlags, poffDisp, pSymbol, &hMod);
if (RT_SUCCESS(rc))
{
dbgfR3AsSymbolJoinNames(pSymbol, hMod);
if (!phMod)
RTDbgModRelease(hMod);
}
- /* Temporary conversions. */
- else if (hDbgAs == DBGF_AS_GLOBAL)
- {
- DBGFSYMBOL DbgfSym;
- rc = DBGFR3SymbolByAddr(pVM, pAddress->FlatPtr, poffDisp, &DbgfSym);
- if (RT_SUCCESS(rc))
- dbgfR3AsSymbolConvert(pSymbol, &DbgfSym);
- }
- else if (hDbgAs == DBGF_AS_R0)
- {
- RTR0PTR R0PtrMod;
- char szNearSym[260];
- RTR0PTR R0PtrNearSym;
- RTR0PTR R0PtrNearSym2;
- rc = PDMR3LdrQueryR0ModFromPC(pVM, pAddress->FlatPtr,
- pSymbol->szName, sizeof(pSymbol->szName) / 2, &R0PtrMod,
- &szNearSym[0], sizeof(szNearSym), &R0PtrNearSym,
- NULL, 0, &R0PtrNearSym2);
- if (RT_SUCCESS(rc))
- {
- pSymbol->offSeg = pSymbol->Value = R0PtrNearSym;
- pSymbol->cb = R0PtrNearSym2 > R0PtrNearSym ? R0PtrNearSym2 - R0PtrNearSym : 0;
- pSymbol->iSeg = 0;
- pSymbol->fFlags = 0;
- pSymbol->iOrdinal = UINT32_MAX;
- size_t offName = strlen(pSymbol->szName);
- pSymbol->szName[offName++] = '!';
- size_t cchNearSym = strlen(szNearSym);
- if (cchNearSym + offName >= sizeof(pSymbol->szName))
- cchNearSym = sizeof(pSymbol->szName) - offName - 1;
- strncpy(&pSymbol->szName[offName], szNearSym, cchNearSym);
- pSymbol->szName[offName + cchNearSym] = '\0';
- if (poffDisp)
- *poffDisp = pAddress->FlatPtr - pSymbol->Value;
- }
- }
return rc;
}
@@ -1065,16 +1225,18 @@ VMMR3DECL(int) DBGFR3AsSymbolByAddr(PVM pVM, RTDBGAS hDbgAs, PCDBGFADDRESS pAddr
* RTDbgSymbolFree(). NULL is returned if not found or any error
* occurs.
*
- * @param pVM Pointer to the VM.
- * @param hDbgAs See DBGFR3AsSymbolByAddr.
- * @param pAddress See DBGFR3AsSymbolByAddr.
- * @param poffDisp See DBGFR3AsSymbolByAddr.
- * @param phMod See DBGFR3AsSymbolByAddr.
+ * @param pUVM The user mode VM handle.
+ * @param hDbgAs See DBGFR3AsSymbolByAddr.
+ * @param pAddress See DBGFR3AsSymbolByAddr.
+ * @param fFlags See DBGFR3AsSymbolByAddr.
+ * @param poffDisp See DBGFR3AsSymbolByAddr.
+ * @param phMod See DBGFR3AsSymbolByAddr.
*/
-VMMR3DECL(PRTDBGSYMBOL) DBGFR3AsSymbolByAddrA(PVM pVM, RTDBGAS hDbgAs, PCDBGFADDRESS pAddress, PRTGCINTPTR poffDisp, PRTDBGMOD phMod)
+VMMR3DECL(PRTDBGSYMBOL) DBGFR3AsSymbolByAddrA(PUVM pUVM, RTDBGAS hDbgAs, PCDBGFADDRESS pAddress, uint32_t fFlags,
+ PRTGCINTPTR poffDisp, PRTDBGMOD phMod)
{
RTDBGSYMBOL SymInfo;
- int rc = DBGFR3AsSymbolByAddr(pVM, hDbgAs, pAddress, poffDisp, &SymInfo, phMod);
+ int rc = DBGFR3AsSymbolByAddr(pUVM, hDbgAs, pAddress, fFlags, poffDisp, &SymInfo, phMod);
if (RT_SUCCESS(rc))
return RTDbgSymbolDup(&SymInfo);
return NULL;
@@ -1090,16 +1252,16 @@ VMMR3DECL(PRTDBGSYMBOL) DBGFR3AsSymbolByAddrA(PVM pVM, RTDBGAS hDbgAs, PCDBGFADD
*
* @returns VBox status code. See RTDbgAsSymbolByAddr.
*
- * @param pVM Pointer to the VM.
- * @param hDbgAs The address space handle.
- * @param pszSymbol The symbol to search for, maybe prefixed by a
- * module pattern.
- * @param pSymbol Where to return the symbol information.
- * The returned symbol name will be prefixed by
- * the module name as far as space allows.
- * @param phMod Where to return the module handle. Optional.
+ * @param pUVM The user mode VM handle.
+ * @param hDbgAs The address space handle.
+ * @param pszSymbol The symbol to search for, maybe prefixed by a
+ * module pattern.
+ * @param pSymbol Where to return the symbol information.
+ * The returned symbol name will be prefixed by
+ * the module name as far as space allows.
+ * @param phMod Where to return the module handle. Optional.
*/
-VMMR3DECL(int) DBGFR3AsSymbolByName(PVM pVM, RTDBGAS hDbgAs, const char *pszSymbol,
+VMMR3DECL(int) DBGFR3AsSymbolByName(PUVM pUVM, RTDBGAS hDbgAs, const char *pszSymbol,
PRTDBGSYMBOL pSymbol, PRTDBGMOD phMod)
{
/*
@@ -1107,21 +1269,21 @@ VMMR3DECL(int) DBGFR3AsSymbolByName(PVM pVM, RTDBGAS hDbgAs, const char *pszSymb
*/
if (hDbgAs == DBGF_AS_RC_AND_GC_GLOBAL)
{
- int rc = DBGFR3AsSymbolByName(pVM, DBGF_AS_RC, pszSymbol, pSymbol, phMod);
+ int rc = DBGFR3AsSymbolByName(pUVM, DBGF_AS_RC, pszSymbol, pSymbol, phMod);
if (RT_FAILURE(rc))
- rc = DBGFR3AsSymbolByName(pVM, DBGF_AS_GLOBAL, pszSymbol, pSymbol, phMod);
+ rc = DBGFR3AsSymbolByName(pUVM, DBGF_AS_GLOBAL, pszSymbol, pSymbol, phMod);
return rc;
}
/*
* Input validation.
*/
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
AssertPtrReturn(pSymbol, VERR_INVALID_POINTER);
AssertPtrNullReturn(phMod, VERR_INVALID_POINTER);
if (phMod)
*phMod = NIL_RTDBGMOD;
- RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pVM, hDbgAs);
+ RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pUVM, hDbgAs);
if (hRealAS == NIL_RTDBGAS)
return VERR_INVALID_HANDLE;
@@ -1137,15 +1299,55 @@ VMMR3DECL(int) DBGFR3AsSymbolByName(PVM pVM, RTDBGAS hDbgAs, const char *pszSymb
if (!phMod)
RTDbgModRelease(hMod);
}
- /* Temporary conversion. */
- else if (hDbgAs == DBGF_AS_GLOBAL)
+
+ return rc;
+}
+
+
+VMMR3DECL(int) DBGFR3AsLineByAddr(PUVM pUVM, RTDBGAS hDbgAs, PCDBGFADDRESS pAddress,
+ PRTGCINTPTR poffDisp, PRTDBGLINE pLine, PRTDBGMOD phMod)
+{
+ /*
+ * Implement the special address space aliases the lazy way.
+ */
+ if (hDbgAs == DBGF_AS_RC_AND_GC_GLOBAL)
{
- DBGFSYMBOL DbgfSym;
- rc = DBGFR3SymbolByName(pVM, pszSymbol, &DbgfSym);
- if (RT_SUCCESS(rc))
- dbgfR3AsSymbolConvert(pSymbol, &DbgfSym);
+ int rc = DBGFR3AsLineByAddr(pUVM, DBGF_AS_RC, pAddress, poffDisp, pLine, phMod);
+ if (RT_FAILURE(rc))
+ rc = DBGFR3AsLineByAddr(pUVM, DBGF_AS_GLOBAL, pAddress, poffDisp, pLine, phMod);
+ return rc;
}
- return rc;
+ /*
+ * Input validation.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
+ AssertPtrNullReturn(poffDisp, VERR_INVALID_POINTER);
+ AssertPtrReturn(pLine, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(phMod, VERR_INVALID_POINTER);
+ if (poffDisp)
+ *poffDisp = 0;
+ if (phMod)
+ *phMod = NIL_RTDBGMOD;
+ RTDBGAS hRealAS = DBGFR3AsResolveAndRetain(pUVM, hDbgAs);
+ if (hRealAS == NIL_RTDBGAS)
+ return VERR_INVALID_HANDLE;
+
+ /*
+ * Do the lookup.
+ */
+ return RTDbgAsLineByAddr(hRealAS, pAddress->FlatPtr, poffDisp, pLine, phMod);
+}
+
+
+VMMR3DECL(PRTDBGLINE) DBGFR3AsLineByAddrA(PUVM pUVM, RTDBGAS hDbgAs, PCDBGFADDRESS pAddress,
+ PRTGCINTPTR poffDisp, PRTDBGMOD phMod)
+{
+ RTDBGLINE Line;
+ int rc = DBGFR3AsLineByAddr(pUVM, hDbgAs, pAddress, poffDisp, &Line, phMod);
+ if (RT_SUCCESS(rc))
+ return RTDbgLineDup(&Line);
+ return NULL;
}
diff --git a/src/VBox/VMM/VMMR3/DBGFBp.cpp b/src/VBox/VMM/VMMR3/DBGFBp.cpp
index f0a553ff..ff7c4325 100644
--- a/src/VBox/VMM/VMMR3/DBGFBp.cpp
+++ b/src/VBox/VMM/VMMR3/DBGFBp.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -27,9 +27,11 @@
#else
# include <VBox/vmm/iem.h>
#endif
+#include <VBox/vmm/mm.h>
#include "DBGFInternal.h"
#include <VBox/vmm/vm.h>
-#include <VBox/vmm/mm.h>
+#include <VBox/vmm/uvm.h>
+
#include <VBox/err.h>
#include <VBox/log.h>
#include <iprt/assert.h>
@@ -40,18 +42,8 @@
* Internal Functions *
*******************************************************************************/
RT_C_DECLS_BEGIN
-static DECLCALLBACK(int) dbgfR3BpSetReg(PVM pVM, PCDBGFADDRESS pAddress, uint64_t *piHitTrigger, uint64_t *piHitDisable,
- uint8_t u8Type, uint8_t cb, uint32_t *piBp);
-static DECLCALLBACK(int) dbgfR3BpSetInt3(PVM pVM, PCDBGFADDRESS pAddress, uint64_t *piHitTrigger, uint64_t *piHitDisable, uint32_t *piBp);
-static DECLCALLBACK(int) dbgfR3BpSetREM(PVM pVM, PCDBGFADDRESS pAddress, uint64_t *piHitTrigger, uint64_t *piHitDisable, uint32_t *piBp);
-static DECLCALLBACK(int) dbgfR3BpClear(PVM pVM, uint32_t iBp);
-static DECLCALLBACK(int) dbgfR3BpEnable(PVM pVM, uint32_t iBp);
-static DECLCALLBACK(int) dbgfR3BpDisable(PVM pVM, uint32_t iBp);
-static DECLCALLBACK(int) dbgfR3BpEnum(PVM pVM, PFNDBGFBPENUM pfnCallback, void *pvUser);
static int dbgfR3BpRegArm(PVM pVM, PDBGFBP pBp);
-static int dbgfR3BpRegDisarm(PVM pVM, PDBGFBP pBp);
-static int dbgfR3BpInt3Arm(PVM pVM, PDBGFBP pBp);
-static int dbgfR3BpInt3Disarm(PVM pVM, PDBGFBP pBp);
+static int dbgfR3BpInt3Arm(PUVM pUVM, PDBGFBP pBp);
RT_C_DECLS_END
@@ -278,32 +270,7 @@ static void dbgfR3BpFree(PVM pVM, PDBGFBP pBp)
* Sets a breakpoint (int 3 based).
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pAddress The address of the breakpoint.
- * @param iHitTrigger The hit count at which the breakpoint start triggering.
- * Use 0 (or 1) if it's gonna trigger at once.
- * @param iHitDisable The hit count which disables the breakpoint.
- * Use ~(uint64_t) if it's never gonna be disabled.
- * @param piBp Where to store the breakpoint id. (optional)
- * @thread Any thread.
- */
-VMMR3DECL(int) DBGFR3BpSet(PVM pVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger, uint64_t iHitDisable, uint32_t *piBp)
-{
- /*
- * This must be done on EMT.
- */
- /** @todo SMP? */
- int rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)dbgfR3BpSetInt3, 5, pVM, pAddress, &iHitTrigger, &iHitDisable, piBp);
- LogFlow(("DBGFR3BpSet: returns %Rrc\n", rc));
- return rc;
-}
-
-
-/**
- * Sets a breakpoint (int 3 based).
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pAddress The address of the breakpoint.
* @param piHitTrigger The hit count at which the breakpoint start triggering.
* Use 0 (or 1) if it's gonna trigger at once.
@@ -312,12 +279,15 @@ VMMR3DECL(int) DBGFR3BpSet(PVM pVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger
* @param piBp Where to store the breakpoint id. (optional)
* @thread Any thread.
*/
-static DECLCALLBACK(int) dbgfR3BpSetInt3(PVM pVM, PCDBGFADDRESS pAddress, uint64_t *piHitTrigger, uint64_t *piHitDisable, uint32_t *piBp)
+static DECLCALLBACK(int) dbgfR3BpSetInt3(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t *piHitTrigger,
+ uint64_t *piHitDisable, uint32_t *piBp)
{
/*
* Validate input.
*/
- if (!DBGFR3AddrIsValid(pVM, pAddress))
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ if (!DBGFR3AddrIsValid(pUVM, pAddress))
return VERR_INVALID_PARAMETER;
if (*piHitTrigger > *piHitDisable)
return VERR_INVALID_PARAMETER;
@@ -333,7 +303,7 @@ static DECLCALLBACK(int) dbgfR3BpSetInt3(PVM pVM, PCDBGFADDRESS pAddress, uint64
{
int rc = VINF_SUCCESS;
if (!pBp->fEnabled)
- rc = dbgfR3BpInt3Arm(pVM, pBp);
+ rc = dbgfR3BpInt3Arm(pUVM, pBp);
if (RT_SUCCESS(rc))
{
rc = VINF_DBGF_BP_ALREADY_EXIST;
@@ -357,7 +327,7 @@ static DECLCALLBACK(int) dbgfR3BpSetInt3(PVM pVM, PCDBGFADDRESS pAddress, uint64
/*
* Now ask REM to set the breakpoint.
*/
- int rc = dbgfR3BpInt3Arm(pVM, pBp);
+ int rc = dbgfR3BpInt3Arm(pUVM, pBp);
if (RT_SUCCESS(rc))
{
if (piBp)
@@ -371,30 +341,56 @@ static DECLCALLBACK(int) dbgfR3BpSetInt3(PVM pVM, PCDBGFADDRESS pAddress, uint64
/**
+ * Sets a breakpoint (int 3 based).
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pAddress The address of the breakpoint.
+ * @param iHitTrigger The hit count at which the breakpoint start triggering.
+ * Use 0 (or 1) if it's gonna trigger at once.
+ * @param iHitDisable The hit count which disables the breakpoint.
+ * Use ~(uint64_t) if it's never gonna be disabled.
+ * @param piBp Where to store the breakpoint id. (optional)
+ * @thread Any thread.
+ */
+VMMR3DECL(int) DBGFR3BpSet(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger, uint64_t iHitDisable, uint32_t *piBp)
+{
+ /*
+ * This must be done on EMT.
+ */
+ /** @todo SMP? */
+ int rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)dbgfR3BpSetInt3, 5,
+ pUVM, pAddress, &iHitTrigger, &iHitDisable, piBp);
+ LogFlow(("DBGFR3BpSet: returns %Rrc\n", rc));
+ return rc;
+}
+
+
+/**
* Arms an int 3 breakpoint.
* This is used to implement both DBGFR3BpSetReg() and DBGFR3BpEnable().
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pBp The breakpoint.
*/
-static int dbgfR3BpInt3Arm(PVM pVM, PDBGFBP pBp)
+static int dbgfR3BpInt3Arm(PUVM pUVM, PDBGFBP pBp)
{
/** @todo should actually use physical address here! */
- /* @todo SMP support! */
+ /** @todo SMP support! */
VMCPUID idCpu = 0;
/*
* Save current byte and write int3 instruction.
*/
DBGFADDRESS Addr;
- DBGFR3AddrFromFlat(pVM, &Addr, pBp->GCPtr);
- int rc = DBGFR3MemRead(pVM, idCpu, &Addr, &pBp->u.Int3.bOrg, 1);
+ DBGFR3AddrFromFlat(pUVM, &Addr, pBp->GCPtr);
+ int rc = DBGFR3MemRead(pUVM, idCpu, &Addr, &pBp->u.Int3.bOrg, 1);
if (RT_SUCCESS(rc))
{
static const uint8_t s_bInt3 = 0xcc;
- rc = DBGFR3MemWrite(pVM, idCpu, &Addr, &s_bInt3, 1);
+ rc = DBGFR3MemWrite(pUVM, idCpu, &Addr, &s_bInt3, 1);
}
return rc;
}
@@ -405,10 +401,10 @@ static int dbgfR3BpInt3Arm(PVM pVM, PDBGFBP pBp)
* This is used to implement both DBGFR3BpClear() and DBGFR3BpDisable().
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pBp The breakpoint.
*/
-static int dbgfR3BpInt3Disarm(PVM pVM, PDBGFBP pBp)
+static int dbgfR3BpInt3Disarm(PUVM pUVM, PDBGFBP pBp)
{
/* @todo SMP support! */
VMCPUID idCpu = 0;
@@ -418,11 +414,11 @@ static int dbgfR3BpInt3Disarm(PVM pVM, PDBGFBP pBp)
* We currently ignore invalid bytes.
*/
DBGFADDRESS Addr;
- DBGFR3AddrFromFlat(pVM, &Addr, pBp->GCPtr);
+ DBGFR3AddrFromFlat(pUVM, &Addr, pBp->GCPtr);
uint8_t bCurrent;
- int rc = DBGFR3MemRead(pVM, idCpu, &Addr, &bCurrent, 1);
+ int rc = DBGFR3MemRead(pUVM, idCpu, &Addr, &bCurrent, 1);
if (bCurrent == 0xcc)
- rc = DBGFR3MemWrite(pVM, idCpu, &Addr, &pBp->u.Int3.bOrg, 1);
+ rc = DBGFR3MemWrite(pUVM, idCpu, &Addr, &pBp->u.Int3.bOrg, 1);
return rc;
}
@@ -431,37 +427,7 @@ static int dbgfR3BpInt3Disarm(PVM pVM, PDBGFBP pBp)
* Sets a register breakpoint.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pAddress The address of the breakpoint.
- * @param iHitTrigger The hit count at which the breakpoint start triggering.
- * Use 0 (or 1) if it's gonna trigger at once.
- * @param iHitDisable The hit count which disables the breakpoint.
- * Use ~(uint64_t) if it's never gonna be disabled.
- * @param fType The access type (one of the X86_DR7_RW_* defines).
- * @param cb The access size - 1,2,4 or 8 (the latter is AMD64 long mode only.
- * Must be 1 if fType is X86_DR7_RW_EO.
- * @param piBp Where to store the breakpoint id. (optional)
- * @thread Any thread.
- */
-VMMR3DECL(int) DBGFR3BpSetReg(PVM pVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger, uint64_t iHitDisable,
- uint8_t fType, uint8_t cb, uint32_t *piBp)
-{
- /** @todo SMP - broadcast, VT-x/AMD-V. */
- /*
- * This must be done on EMT.
- */
- int rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)dbgfR3BpSetReg, 7, pVM, pAddress, &iHitTrigger, &iHitDisable, fType, cb, piBp);
- LogFlow(("DBGFR3BpSetReg: returns %Rrc\n", rc));
- return rc;
-
-}
-
-
-/**
- * Sets a register breakpoint.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pAddress The address of the breakpoint.
* @param piHitTrigger The hit count at which the breakpoint start triggering.
* Use 0 (or 1) if it's gonna trigger at once.
@@ -474,13 +440,15 @@ VMMR3DECL(int) DBGFR3BpSetReg(PVM pVM, PCDBGFADDRESS pAddress, uint64_t iHitTrig
* @thread EMT
* @internal
*/
-static DECLCALLBACK(int) dbgfR3BpSetReg(PVM pVM, PCDBGFADDRESS pAddress, uint64_t *piHitTrigger, uint64_t *piHitDisable,
+static DECLCALLBACK(int) dbgfR3BpSetReg(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t *piHitTrigger, uint64_t *piHitDisable,
uint8_t fType, uint8_t cb, uint32_t *piBp)
{
/*
* Validate input.
*/
- if (!DBGFR3AddrIsValid(pVM, pAddress))
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ if (!DBGFR3AddrIsValid(pUVM, pAddress))
return VERR_INVALID_PARAMETER;
if (*piHitTrigger > *piHitDisable)
return VERR_INVALID_PARAMETER;
@@ -564,62 +532,70 @@ static DECLCALLBACK(int) dbgfR3BpSetReg(PVM pVM, PCDBGFADDRESS pAddress, uint64_
/**
- * Arms a debug register breakpoint.
- * This is used to implement both DBGFR3BpSetReg() and DBGFR3BpEnable().
+ * Sets a register breakpoint.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pBp The breakpoint.
+ * @param pUVM The user mode VM handle.
+ * @param pAddress The address of the breakpoint.
+ * @param iHitTrigger The hit count at which the breakpoint start triggering.
+ * Use 0 (or 1) if it's gonna trigger at once.
+ * @param iHitDisable The hit count which disables the breakpoint.
+ * Use ~(uint64_t) if it's never gonna be disabled.
+ * @param fType The access type (one of the X86_DR7_RW_* defines).
+ * @param cb The access size - 1,2,4 or 8 (the latter is AMD64 long mode only.
+ * Must be 1 if fType is X86_DR7_RW_EO.
+ * @param piBp Where to store the breakpoint id. (optional)
+ * @thread Any thread.
*/
-static int dbgfR3BpRegArm(PVM pVM, PDBGFBP pBp)
+VMMR3DECL(int) DBGFR3BpSetReg(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger, uint64_t iHitDisable,
+ uint8_t fType, uint8_t cb, uint32_t *piBp)
{
- /* @todo SMP support! */
- PVMCPU pVCpu = &pVM->aCpus[0];
+ /*
+ * This must be done on EMT.
+ */
+ int rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)dbgfR3BpSetReg, 7,
+ pUVM, pAddress, &iHitTrigger, &iHitDisable, fType, cb, piBp);
+ LogFlow(("DBGFR3BpSetReg: returns %Rrc\n", rc));
+ return rc;
- Assert(pBp->fEnabled);
- return CPUMRecalcHyperDRx(pVCpu);
+}
+
+
+/** @callback_method_impl{FNVMMEMTRENDEZVOUS} */
+DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpRegRecalcOnCpu(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ NOREF(pVM); NOREF(pvUser);
+ return CPUMRecalcHyperDRx(pVCpu, UINT8_MAX, false);
}
/**
- * Disarms a debug register breakpoint.
- * This is used to implement both DBGFR3BpClear() and DBGFR3BpDisable().
+ * Arms a debug register breakpoint.
+ * This is used to implement both DBGFR3BpSetReg() and DBGFR3BpEnable().
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pBp The breakpoint.
*/
-static int dbgfR3BpRegDisarm(PVM pVM, PDBGFBP pBp)
+static int dbgfR3BpRegArm(PVM pVM, PDBGFBP pBp)
{
- /** @todo SMP support! */
- PVMCPU pVCpu = &pVM->aCpus[0];
-
- Assert(!pBp->fEnabled);
- return CPUMRecalcHyperDRx(pVCpu);
+ Assert(pBp->fEnabled);
+ return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpRegRecalcOnCpu, NULL);
}
/**
- * Sets a recompiler breakpoint.
+ * Disarms a debug register breakpoint.
+ * This is used to implement both DBGFR3BpClear() and DBGFR3BpDisable().
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pAddress The address of the breakpoint.
- * @param iHitTrigger The hit count at which the breakpoint start triggering.
- * Use 0 (or 1) if it's gonna trigger at once.
- * @param iHitDisable The hit count which disables the breakpoint.
- * Use ~(uint64_t) if it's never gonna be disabled.
- * @param piBp Where to store the breakpoint id. (optional)
- * @thread Any thread.
+ * @param pUVM The user mode VM handle.
+ * @param pBp The breakpoint.
*/
-VMMR3DECL(int) DBGFR3BpSetREM(PVM pVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger, uint64_t iHitDisable, uint32_t *piBp)
+static int dbgfR3BpRegDisarm(PVM pVM, PDBGFBP pBp)
{
- /*
- * This must be done on EMT.
- */
- int rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)dbgfR3BpSetREM, 5, pVM, pAddress, &iHitTrigger, &iHitDisable, piBp);
- LogFlow(("DBGFR3BpSetREM: returns %Rrc\n", rc));
- return rc;
+ Assert(!pBp->fEnabled);
+ return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpRegRecalcOnCpu, NULL);
}
@@ -627,7 +603,7 @@ VMMR3DECL(int) DBGFR3BpSetREM(PVM pVM, PCDBGFADDRESS pAddress, uint64_t iHitTrig
* EMT worker for DBGFR3BpSetREM().
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pAddress The address of the breakpoint.
* @param piHitTrigger The hit count at which the breakpoint start triggering.
* Use 0 (or 1) if it's gonna trigger at once.
@@ -637,12 +613,15 @@ VMMR3DECL(int) DBGFR3BpSetREM(PVM pVM, PCDBGFADDRESS pAddress, uint64_t iHitTrig
* @thread EMT
* @internal
*/
-static DECLCALLBACK(int) dbgfR3BpSetREM(PVM pVM, PCDBGFADDRESS pAddress, uint64_t *piHitTrigger, uint64_t *piHitDisable, uint32_t *piBp)
+static DECLCALLBACK(int) dbgfR3BpSetREM(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t *piHitTrigger,
+ uint64_t *piHitDisable, uint32_t *piBp)
{
/*
* Validate input.
*/
- if (!DBGFR3AddrIsValid(pVM, pAddress))
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ if (!DBGFR3AddrIsValid(pUVM, pAddress))
return VERR_INVALID_PARAMETER;
if (*piHitTrigger > *piHitDisable)
return VERR_INVALID_PARAMETER;
@@ -705,20 +684,26 @@ static DECLCALLBACK(int) dbgfR3BpSetREM(PVM pVM, PCDBGFADDRESS pAddress, uint64_
/**
- * Clears a breakpoint.
+ * Sets a recompiler breakpoint.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param iBp The id of the breakpoint which should be removed (cleared).
+ * @param pUVM The user mode VM handle.
+ * @param pAddress The address of the breakpoint.
+ * @param iHitTrigger The hit count at which the breakpoint start triggering.
+ * Use 0 (or 1) if it's gonna trigger at once.
+ * @param iHitDisable The hit count which disables the breakpoint.
+ * Use ~(uint64_t) if it's never gonna be disabled.
+ * @param piBp Where to store the breakpoint id. (optional)
* @thread Any thread.
*/
-VMMR3DECL(int) DBGFR3BpClear(PVM pVM, uint32_t iBp)
+VMMR3DECL(int) DBGFR3BpSetREM(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger, uint64_t iHitDisable, uint32_t *piBp)
{
/*
* This must be done on EMT.
*/
- int rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)dbgfR3BpClear, 2, pVM, iBp);
- LogFlow(("DBGFR3BpClear: returns %Rrc\n", rc));
+ int rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)dbgfR3BpSetREM, 5,
+ pUVM, pAddress, &iHitTrigger, &iHitDisable, piBp);
+ LogFlow(("DBGFR3BpSetREM: returns %Rrc\n", rc));
return rc;
}
@@ -727,16 +712,18 @@ VMMR3DECL(int) DBGFR3BpClear(PVM pVM, uint32_t iBp)
* EMT worker for DBGFR3BpClear().
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param iBp The id of the breakpoint which should be removed (cleared).
* @thread EMT
* @internal
*/
-static DECLCALLBACK(int) dbgfR3BpClear(PVM pVM, uint32_t iBp)
+static DECLCALLBACK(int) dbgfR3BpClear(PUVM pUVM, uint32_t iBp)
{
/*
* Validate input.
*/
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
PDBGFBP pBp = dbgfR3BpGet(pVM, iBp);
if (!pBp)
return VERR_DBGF_BP_NOT_FOUND;
@@ -755,7 +742,7 @@ static DECLCALLBACK(int) dbgfR3BpClear(PVM pVM, uint32_t iBp)
break;
case DBGFBPTYPE_INT3:
- rc = dbgfR3BpInt3Disarm(pVM, pBp);
+ rc = dbgfR3BpInt3Disarm(pUVM, pBp);
break;
case DBGFBPTYPE_REM:
@@ -781,20 +768,20 @@ static DECLCALLBACK(int) dbgfR3BpClear(PVM pVM, uint32_t iBp)
/**
- * Enables a breakpoint.
+ * Clears a breakpoint.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param iBp The id of the breakpoint which should be enabled.
+ * @param pUVM The user mode VM handle.
+ * @param iBp The id of the breakpoint which should be removed (cleared).
* @thread Any thread.
*/
-VMMR3DECL(int) DBGFR3BpEnable(PVM pVM, uint32_t iBp)
+VMMR3DECL(int) DBGFR3BpClear(PUVM pUVM, uint32_t iBp)
{
/*
* This must be done on EMT.
*/
- int rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)dbgfR3BpEnable, 2, pVM, iBp);
- LogFlow(("DBGFR3BpEnable: returns %Rrc\n", rc));
+ int rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)dbgfR3BpClear, 2, pUVM, iBp);
+ LogFlow(("DBGFR3BpClear: returns %Rrc\n", rc));
return rc;
}
@@ -803,16 +790,18 @@ VMMR3DECL(int) DBGFR3BpEnable(PVM pVM, uint32_t iBp)
* EMT worker for DBGFR3BpEnable().
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param iBp The id of the breakpoint which should be enabled.
* @thread EMT
* @internal
*/
-static DECLCALLBACK(int) dbgfR3BpEnable(PVM pVM, uint32_t iBp)
+static DECLCALLBACK(int) dbgfR3BpEnable(PUVM pUVM, uint32_t iBp)
{
/*
* Validate input.
*/
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
PDBGFBP pBp = dbgfR3BpGet(pVM, iBp);
if (!pBp)
return VERR_DBGF_BP_NOT_FOUND;
@@ -835,7 +824,7 @@ static DECLCALLBACK(int) dbgfR3BpEnable(PVM pVM, uint32_t iBp)
break;
case DBGFBPTYPE_INT3:
- rc = dbgfR3BpInt3Arm(pVM, pBp);
+ rc = dbgfR3BpInt3Arm(pUVM, pBp);
break;
case DBGFBPTYPE_REM:
@@ -857,20 +846,20 @@ static DECLCALLBACK(int) dbgfR3BpEnable(PVM pVM, uint32_t iBp)
/**
- * Disables a breakpoint.
+ * Enables a breakpoint.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param iBp The id of the breakpoint which should be disabled.
+ * @param pUVM The user mode VM handle.
+ * @param iBp The id of the breakpoint which should be enabled.
* @thread Any thread.
*/
-VMMR3DECL(int) DBGFR3BpDisable(PVM pVM, uint32_t iBp)
+VMMR3DECL(int) DBGFR3BpEnable(PUVM pUVM, uint32_t iBp)
{
/*
* This must be done on EMT.
*/
- int rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)dbgfR3BpDisable, 2, pVM, iBp);
- LogFlow(("DBGFR3BpDisable: returns %Rrc\n", rc));
+ int rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)dbgfR3BpEnable, 2, pUVM, iBp);
+ LogFlow(("DBGFR3BpEnable: returns %Rrc\n", rc));
return rc;
}
@@ -879,16 +868,18 @@ VMMR3DECL(int) DBGFR3BpDisable(PVM pVM, uint32_t iBp)
* EMT worker for DBGFR3BpDisable().
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param iBp The id of the breakpoint which should be disabled.
* @thread EMT
* @internal
*/
-static DECLCALLBACK(int) dbgfR3BpDisable(PVM pVM, uint32_t iBp)
+static DECLCALLBACK(int) dbgfR3BpDisable(PUVM pUVM, uint32_t iBp)
{
/*
* Validate input.
*/
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
PDBGFBP pBp = dbgfR3BpGet(pVM, iBp);
if (!pBp)
return VERR_DBGF_BP_NOT_FOUND;
@@ -911,7 +902,7 @@ static DECLCALLBACK(int) dbgfR3BpDisable(PVM pVM, uint32_t iBp)
break;
case DBGFBPTYPE_INT3:
- rc = dbgfR3BpInt3Disarm(pVM, pBp);
+ rc = dbgfR3BpInt3Disarm(pUVM, pBp);
break;
case DBGFBPTYPE_REM:
@@ -931,21 +922,20 @@ static DECLCALLBACK(int) dbgfR3BpDisable(PVM pVM, uint32_t iBp)
/**
- * Enumerate the breakpoints.
+ * Disables a breakpoint.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pfnCallback The callback function.
- * @param pvUser The user argument to pass to the callback.
- * @thread Any thread but the callback will be called from EMT.
+ * @param pUVM The user mode VM handle.
+ * @param iBp The id of the breakpoint which should be disabled.
+ * @thread Any thread.
*/
-VMMR3DECL(int) DBGFR3BpEnum(PVM pVM, PFNDBGFBPENUM pfnCallback, void *pvUser)
+VMMR3DECL(int) DBGFR3BpDisable(PUVM pUVM, uint32_t iBp)
{
/*
* This must be done on EMT.
*/
- int rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)dbgfR3BpEnum, 3, pVM, pfnCallback, pvUser);
- LogFlow(("DBGFR3BpClear: returns %Rrc\n", rc));
+ int rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)dbgfR3BpDisable, 2, pUVM, iBp);
+ LogFlow(("DBGFR3BpDisable: returns %Rrc\n", rc));
return rc;
}
@@ -954,18 +944,20 @@ VMMR3DECL(int) DBGFR3BpEnum(PVM pVM, PFNDBGFBPENUM pfnCallback, void *pvUser)
* EMT worker for DBGFR3BpEnum().
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pfnCallback The callback function.
* @param pvUser The user argument to pass to the callback.
* @thread EMT
* @internal
*/
-static DECLCALLBACK(int) dbgfR3BpEnum(PVM pVM, PFNDBGFBPENUM pfnCallback, void *pvUser)
+static DECLCALLBACK(int) dbgfR3BpEnum(PUVM pUVM, PFNDBGFBPENUM pfnCallback, void *pvUser)
{
/*
* Validate input.
*/
- AssertMsgReturn(VALID_PTR(pfnCallback), ("pfnCallback=%p\n", pfnCallback), VERR_INVALID_POINTER);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
/*
* Enumerate the hardware breakpoints.
@@ -974,7 +966,7 @@ static DECLCALLBACK(int) dbgfR3BpEnum(PVM pVM, PFNDBGFBPENUM pfnCallback, void *
for (i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
if (pVM->dbgf.s.aHwBreakpoints[i].enmType != DBGFBPTYPE_FREE)
{
- int rc = pfnCallback(pVM, pvUser, &pVM->dbgf.s.aHwBreakpoints[i]);
+ int rc = pfnCallback(pUVM, pvUser, &pVM->dbgf.s.aHwBreakpoints[i]);
if (RT_FAILURE(rc))
return rc;
}
@@ -985,7 +977,7 @@ static DECLCALLBACK(int) dbgfR3BpEnum(PVM pVM, PFNDBGFBPENUM pfnCallback, void *
for (i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aBreakpoints); i++)
if (pVM->dbgf.s.aBreakpoints[i].enmType != DBGFBPTYPE_FREE)
{
- int rc = pfnCallback(pVM, pvUser, &pVM->dbgf.s.aBreakpoints[i]);
+ int rc = pfnCallback(pUVM, pvUser, &pVM->dbgf.s.aBreakpoints[i]);
if (RT_FAILURE(rc))
return rc;
}
@@ -993,3 +985,23 @@ static DECLCALLBACK(int) dbgfR3BpEnum(PVM pVM, PFNDBGFBPENUM pfnCallback, void *
return VINF_SUCCESS;
}
+
+/**
+ * Enumerate the breakpoints.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pfnCallback The callback function.
+ * @param pvUser The user argument to pass to the callback.
+ * @thread Any thread but the callback will be called from EMT.
+ */
+VMMR3DECL(int) DBGFR3BpEnum(PUVM pUVM, PFNDBGFBPENUM pfnCallback, void *pvUser)
+{
+ /*
+ * This must be done on EMT.
+ */
+ int rc = VMR3ReqPriorityCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)dbgfR3BpEnum, 3, pUVM, pfnCallback, pvUser);
+ LogFlow(("DBGFR3BpClear: returns %Rrc\n", rc));
+ return rc;
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp b/src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp
index b4ca067f..21a4e92a 100644
--- a/src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp
+++ b/src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2010 Oracle Corporation
+ * Copyright (C) 2010-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -54,14 +54,15 @@
#include "DBGFInternal.h"
#include <VBox/vmm/cpum.h>
-#include "CPUMInternal.h"
+#include <VBox/vmm/pgm.h>
#include <VBox/vmm/dbgf.h>
#include <VBox/vmm/dbgfcorefmt.h>
+#include <VBox/vmm/mm.h>
#include <VBox/vmm/vm.h>
-#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/uvm.h>
+
#include <VBox/err.h>
#include <VBox/log.h>
-#include <VBox/vmm/mm.h>
#include <VBox/version.h>
#include "../../Runtime/include/internal/ldrELF64.h"
@@ -426,10 +427,12 @@ static int dbgfR3CoreWriteWorker(PVM pVM, RTFILE hFile)
/*
* Write the CPU context note headers and data.
*/
+ /** @todo r=ramshankar: Dump a more standardized CPU structure rather than
+ * dumping CPUMCTX and bump the core file version number. */
Assert(RTFileTell(hFile) == offCpuDumps);
for (uint32_t iCpu = 0; iCpu < pVM->cCpus; iCpu++)
{
- PCPUMCTX pCpuCtx = &pVM->aCpus[iCpu].cpum.s.Guest;
+ PCPUMCTX pCpuCtx = CPUMQueryGuestCtxPtr(&pVM->aCpus[iCpu]);
rc = Elf64WriteNoteHdr(hFile, NT_VBOXCPU, s_pcszCoreVBoxCpu, pCpuCtx, sizeof(CPUMCTX));
if (RT_FAILURE(rc))
{
@@ -533,16 +536,19 @@ static DECLCALLBACK(VBOXSTRICTRC) dbgfR3CoreWriteRendezvous(PVM pVM, PVMCPU pVCp
* Write core dump of the guest.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszFilename The name of the file to which the guest core
* dump should be written.
* @param fReplaceFile Whether to replace the file or not.
*
- * @remarks The VM should be suspended before calling this function or DMA may
- * interfer with the state.
+ * @remarks The VM may need to be suspended before calling this function in
+ * order to truly stop all device threads and drivers. This function
+ * only synchronizes EMTs.
*/
-VMMR3DECL(int) DBGFR3CoreWrite(PVM pVM, const char *pszFilename, bool fReplaceFile)
+VMMR3DECL(int) DBGFR3CoreWrite(PUVM pUVM, const char *pszFilename, bool fReplaceFile)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
AssertReturn(pszFilename, VERR_INVALID_HANDLE);
diff --git a/src/VBox/VMM/VMMR3/DBGFCpu.cpp b/src/VBox/VMM/VMMR3/DBGFCpu.cpp
index 0aff3100..afb58430 100644
--- a/src/VBox/VMM/VMMR3/DBGFCpu.cpp
+++ b/src/VBox/VMM/VMMR3/DBGFCpu.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2009 Oracle Corporation
+ * Copyright (C) 2009-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -24,6 +24,7 @@
#include <VBox/vmm/cpum.h>
#include "DBGFInternal.h"
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include <VBox/err.h>
#include <VBox/log.h>
#include <VBox/param.h>
@@ -34,9 +35,9 @@
* Wrapper around CPUMGetGuestMode.
*
* @returns VINF_SUCCESS.
- * @param pVM Pointer to the VM.
- * @param idCpu The current CPU ID.
- * @param penmMode Where to return the mode.
+ * @param pVM Pointer to the VM.
+ * @param idCpu The current CPU ID.
+ * @param penmMode Where to return the mode.
*/
static DECLCALLBACK(int) dbgfR3CpuGetMode(PVM pVM, VMCPUID idCpu, CPUMMODE *penmMode)
{
@@ -51,18 +52,70 @@ static DECLCALLBACK(int) dbgfR3CpuGetMode(PVM pVM, VMCPUID idCpu, CPUMMODE *penm
* Get the current CPU mode.
*
* @returns The CPU mode on success, CPUMMODE_INVALID on failure.
- * @param pVM Pointer to the VM.
- * @param idCpu The target CPU ID.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The target CPU ID.
*/
-VMMR3DECL(CPUMMODE) DBGFR3CpuGetMode(PVM pVM, VMCPUID idCpu)
+VMMR3DECL(CPUMMODE) DBGFR3CpuGetMode(PUVM pUVM, VMCPUID idCpu)
{
- VM_ASSERT_VALID_EXT_RETURN(pVM, CPUMMODE_INVALID);
- AssertReturn(idCpu < pVM->cCpus, CPUMMODE_INVALID);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, CPUMMODE_INVALID);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, CPUMMODE_INVALID);
+ AssertReturn(idCpu < pUVM->pVM->cCpus, CPUMMODE_INVALID);
CPUMMODE enmMode;
- int rc = VMR3ReqPriorityCallWait(pVM, idCpu, (PFNRT)dbgfR3CpuGetMode, 3, pVM, idCpu, &enmMode);
+ int rc = VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3CpuGetMode, 3, pUVM->pVM, idCpu, &enmMode);
if (RT_FAILURE(rc))
return CPUMMODE_INVALID;
return enmMode;
}
+
+/**
+ * Wrapper around CPUMIsGuestIn64BitCode.
+ *
+ * @returns VINF_SUCCESS.
+ * @param pVM Pointer to the VM.
+ * @param idCpu The current CPU ID.
+ * @param pfIn64BitCode Where to return the result.
+ */
+static DECLCALLBACK(int) dbgfR3CpuIn64BitCode(PVM pVM, VMCPUID idCpu, bool *pfIn64BitCode)
+{
+ Assert(idCpu == VMMGetCpuId(pVM));
+ PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
+ *pfIn64BitCode = CPUMIsGuestIn64BitCode(pVCpu);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Checks if the given CPU is executing 64-bit code or not.
+ *
+ * @returns true / false accordingly.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The target CPU ID.
+ */
+VMMR3DECL(bool) DBGFR3CpuIsIn64BitCode(PUVM pUVM, VMCPUID idCpu)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, false);
+ AssertReturn(idCpu < pUVM->pVM->cCpus, false);
+
+ bool fIn64BitCode;
+ int rc = VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3CpuIn64BitCode, 3, pUVM->pVM, idCpu, &fIn64BitCode);
+ if (RT_FAILURE(rc))
+ return false;
+ return fIn64BitCode;
+}
+
+
+/**
+ * Get the number of CPUs (or threads if you insist).
+ *
+ * @returns The number of CPUs
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(VMCPUID) DBGFR3CpuGetCount(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, 1);
+ return pUVM->cCpus;
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFDisas.cpp b/src/VBox/VMM/VMMR3/DBGFDisas.cpp
index e4ac0476..319963d5 100644
--- a/src/VBox/VMM/VMMR3/DBGFDisas.cpp
+++ b/src/VBox/VMM/VMMR3/DBGFDisas.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -22,13 +22,18 @@
#include <VBox/vmm/dbgf.h>
#include <VBox/vmm/selm.h>
#include <VBox/vmm/mm.h>
+#include <VBox/vmm/hm.h>
#include <VBox/vmm/pgm.h>
#include <VBox/vmm/cpum.h>
+#ifdef VBOX_WITH_RAW_MODE
+# include <VBox/vmm/patm.h>
+#endif
#include "DBGFInternal.h"
#include <VBox/dis.h>
#include <VBox/err.h>
#include <VBox/param.h>
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include "internal/pgm.h"
#include <VBox/log.h>
@@ -54,7 +59,7 @@ typedef struct
/** Pointer to the VMCPU. */
PVMCPU pVCpu;
/** The address space for resolving symbol. */
- RTDBGAS hAs;
+ RTDBGAS hDbgAs;
/** Pointer to the first byte in the segment. */
RTGCUINTPTR GCPtrSegBase;
/** Pointer to the byte after the end of the segment. (might have wrapped!) */
@@ -75,6 +80,10 @@ typedef struct
bool fLocked;
/** 64 bits mode or not. */
bool f64Bits;
+ /** Read original unpatched bytes from the patch manager. */
+ bool fUnpatchedBytes;
+ /** Set when fUnpatchedBytes is active and we encounter patched bytes. */
+ bool fPatchedInstr;
} DBGFDISASSTATE, *PDBGFDISASSTATE;
@@ -106,13 +115,17 @@ static int dbgfR3DisasInstrFirst(PVM pVM, PVMCPU pVCpu, PDBGFSELINFO pSelInfo, P
pState->enmMode = enmMode;
pState->GCPtrPage = 0;
pState->pvPageR3 = NULL;
- pState->hAs = pSelInfo->fFlags & DBGFSELINFO_FLAGS_HYPER /** @todo Deal more explicitly with RC in DBGFR3Disas*. */
+ pState->hDbgAs = !HMIsEnabled(pVM)
? DBGF_AS_RC_AND_GC_GLOBAL
: DBGF_AS_GLOBAL;
pState->pVM = pVM;
pState->pVCpu = pVCpu;
pState->fLocked = false;
pState->f64Bits = enmMode >= PGMMODE_AMD64 && pSelInfo->u.Raw.Gen.u1Long;
+#ifdef VBOX_WITH_RAW_MODE
+ pState->fUnpatchedBytes = RT_BOOL(fFlags & DBGF_DISAS_FLAGS_UNPATCHED_BYTES);
+ pState->fPatchedInstr = false;
+#endif
DISCPUMODE enmCpuMode;
switch (fFlags & DBGF_DISAS_FLAGS_MODE_MASK)
@@ -220,7 +233,8 @@ static DECLCALLBACK(int) dbgfR3DisasInstrRead(PDISCPUSTATE pDis, uint8_t offInst
/* translate the address */
pState->GCPtrPage = GCPtr & PAGE_BASE_GC_MASK;
- if (MMHyperIsInsideArea(pState->pVM, pState->GCPtrPage))
+ if ( !HMIsEnabled(pState->pVM)
+ && MMHyperIsInsideArea(pState->pVM, pState->GCPtrPage))
{
pState->pvPageR3 = MMHyperRCToR3(pState->pVM, (RTRCPTR)pState->GCPtrPage);
if (!pState->pvPageR3)
@@ -263,6 +277,34 @@ static DECLCALLBACK(int) dbgfR3DisasInstrRead(PDISCPUSTATE pDis, uint8_t offInst
if (cb > cbMaxRead)
cb = cbMaxRead;
+#ifdef VBOX_WITH_RAW_MODE
+ /*
+ * Read original bytes from PATM if asked to do so.
+ */
+ if (pState->fUnpatchedBytes)
+ {
+ size_t cbRead = cb;
+ int rc = PATMR3ReadOrgInstr(pState->pVM, GCPtr, &pDis->abInstr[offInstr], cbRead, &cbRead);
+ if (RT_SUCCESS(rc))
+ {
+ pState->fPatchedInstr = true;
+ if (cbRead >= cbMinRead)
+ {
+ pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
+ return rc;
+ }
+
+ cbMinRead -= (uint8_t)cbRead;
+ cbMaxRead -= (uint8_t)cbRead;
+ cb -= (uint8_t)cbRead;
+ offInstr += (uint8_t)cbRead;
+ GCPtr += cbRead;
+ if (!cb)
+ continue;
+ }
+ }
+#endif /* VBOX_WITH_RAW_MODE */
+
/*
* Read and advance,
*/
@@ -282,36 +324,73 @@ static DECLCALLBACK(int) dbgfR3DisasInstrRead(PDISCPUSTATE pDis, uint8_t offInst
/**
* @copydoc FNDISGETSYMBOL
*/
-static DECLCALLBACK(int) dbgfR3DisasGetSymbol(PCDISCPUSTATE pCpu, uint32_t u32Sel, RTUINTPTR uAddress, char *pszBuf, size_t cchBuf, RTINTPTR *poff, void *pvUser)
+static DECLCALLBACK(int) dbgfR3DisasGetSymbol(PCDISCPUSTATE pCpu, uint32_t u32Sel, RTUINTPTR uAddress,
+ char *pszBuf, size_t cchBuf, RTINTPTR *poff, void *pvUser)
{
PDBGFDISASSTATE pState = (PDBGFDISASSTATE)pCpu;
PCDBGFSELINFO pSelInfo = (PCDBGFSELINFO)pvUser;
+
+ /*
+ * Address conversion
+ */
DBGFADDRESS Addr;
- RTDBGSYMBOL Sym;
- RTGCINTPTR off;
int rc;
-
+ /* Start with CS. */
if ( DIS_FMT_SEL_IS_REG(u32Sel)
? DIS_FMT_SEL_GET_REG(u32Sel) == DISSELREG_CS
: pSelInfo->Sel == DIS_FMT_SEL_GET_VALUE(u32Sel))
+ rc = DBGFR3AddrFromSelInfoOff(pState->pVM->pUVM, &Addr, pSelInfo, uAddress);
+ /* In long mode everything but FS and GS is easy. */
+ else if ( pState->Cpu.uCpuMode == DISCPUMODE_64BIT
+ && DIS_FMT_SEL_IS_REG(u32Sel)
+ && DIS_FMT_SEL_GET_REG(u32Sel) != DISSELREG_GS
+ && DIS_FMT_SEL_GET_REG(u32Sel) != DISSELREG_FS)
{
- rc = DBGFR3AddrFromSelInfoOff(pState->pVM, &Addr, pSelInfo, uAddress);
- if (RT_SUCCESS(rc))
- rc = DBGFR3AsSymbolByAddr(pState->pVM, pState->hAs, &Addr, &off, &Sym, NULL /*phMod*/);
+ DBGFR3AddrFromFlat(pState->pVM->pUVM, &Addr, uAddress);
+ rc = VINF_SUCCESS;
+ }
+ /* Here's a quick hack to catch patch manager SS relative access. */
+ else if ( DIS_FMT_SEL_IS_REG(u32Sel)
+ && DIS_FMT_SEL_GET_REG(u32Sel) == DISSELREG_SS
+ && pSelInfo->GCPtrBase == 0
+ && pSelInfo->cbLimit >= UINT32_MAX
+#ifdef VBOX_WITH_RAW_MODE
+ && PATMIsPatchGCAddr(pState->pVM, pState->Cpu.uInstrAddr)
+#endif
+ )
+ {
+ DBGFR3AddrFromFlat(pState->pVM->pUVM, &Addr, uAddress);
+ rc = VINF_SUCCESS;
}
else
- rc = VERR_SYMBOL_NOT_FOUND; /** @todo implement this */
- if (RT_SUCCESS(rc))
{
- size_t cchName = strlen(Sym.szName);
- if (cchName >= cchBuf)
- cchName = cchBuf - 1;
- memcpy(pszBuf, Sym.szName, cchName);
- pszBuf[cchName] = '\0';
-
- *poff = off;
+ /** @todo implement a generic solution here. */
+ rc = VERR_SYMBOL_NOT_FOUND;
}
+ /*
+ * If we got an address, try resolve it into a symbol.
+ */
+ if (RT_SUCCESS(rc))
+ {
+ RTDBGSYMBOL Sym;
+ RTGCINTPTR off;
+ rc = DBGFR3AsSymbolByAddr(pState->pVM->pUVM, pState->hDbgAs, &Addr, RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL,
+ &off, &Sym, NULL /*phMod*/);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Return the symbol and offset.
+ */
+ size_t cchName = strlen(Sym.szName);
+ if (cchName >= cchBuf)
+ cchName = cchBuf - 1;
+ memcpy(pszBuf, Sym.szName, cchName);
+ pszBuf[cchName] = '\0';
+
+ *poff = off;
+ }
+ }
return rc;
}
@@ -355,6 +434,7 @@ dbgfR3DisasInstrExOnVCpu(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PRTGCPTR pGCPtr, uint
}
else if (fFlags & DBGF_DISAS_FLAGS_CURRENT_HYPER)
{
+ fFlags |= DBGF_DISAS_FLAGS_HYPER;
pCtxCore = CPUMGetHyperCtxCore(pVCpu);
Sel = pCtxCore->cs.Sel;
GCPtr = pCtxCore->rip;
@@ -445,7 +525,7 @@ dbgfR3DisasInstrExOnVCpu(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PRTGCPTR pGCPtr, uint
SelInfo.u.Raw.Gen.u4Type = X86_SEL_TYPE_EO;
}
}
- else if ( !(fFlags & DBGF_DISAS_FLAGS_CURRENT_HYPER)
+ else if ( !(fFlags & DBGF_DISAS_FLAGS_HYPER)
&& ( (pCtxCore && pCtxCore->eflags.Bits.u1VM)
|| enmMode == PGMMODE_REAL
|| (fFlags & DBGF_DISAS_FLAGS_MODE_MASK) == DBGF_DISAS_FLAGS_16BIT_REAL_MODE
@@ -470,7 +550,10 @@ dbgfR3DisasInstrExOnVCpu(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PRTGCPTR pGCPtr, uint
}
else
{
- rc = SELMR3GetSelectorInfo(pVM, pVCpu, Sel, &SelInfo);
+ if (!(fFlags & DBGF_DISAS_FLAGS_HYPER))
+ rc = SELMR3GetSelectorInfo(pVM, pVCpu, Sel, &SelInfo);
+ else
+ rc = SELMR3GetShadowSelectorInfo(pVM, Sel, &SelInfo);
if (RT_FAILURE(rc))
{
RTStrPrintf(pszOutput, cbOutput, "Sel=%04x -> %Rrc\n", Sel, rc);
@@ -485,7 +568,10 @@ dbgfR3DisasInstrExOnVCpu(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PRTGCPTR pGCPtr, uint
rc = dbgfR3DisasInstrFirst(pVM, pVCpu, &SelInfo, enmMode, GCPtr, fFlags, &State);
if (RT_FAILURE(rc))
{
- RTStrPrintf(pszOutput, cbOutput, "Disas -> %Rrc\n", rc);
+ if (State.Cpu.cbCachedInstr)
+ RTStrPrintf(pszOutput, cbOutput, "Disas -> %Rrc; %.*Rhxs\n", rc, (size_t)State.Cpu.cbCachedInstr, State.Cpu.abInstr);
+ else
+ RTStrPrintf(pszOutput, cbOutput, "Disas -> %Rrc\n", rc);
return rc;
}
@@ -498,28 +584,39 @@ dbgfR3DisasInstrExOnVCpu(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PRTGCPTR pGCPtr, uint
fFlags & DBGF_DISAS_FLAGS_NO_SYMBOLS ? NULL : dbgfR3DisasGetSymbol,
&SelInfo);
+#ifdef VBOX_WITH_RAW_MODE
+ /*
+ * Patched instruction annotations.
+ */
+ char szPatchAnnotations[256];
+ szPatchAnnotations[0] = '\0';
+ if (fFlags & DBGF_DISAS_FLAGS_ANNOTATE_PATCHED)
+ PATMR3DbgAnnotatePatchedInstruction(pVM, GCPtr, State.Cpu.cbInstr, szPatchAnnotations, sizeof(szPatchAnnotations));
+#endif
+
/*
* Print it to the user specified buffer.
*/
+ size_t cch;
if (fFlags & DBGF_DISAS_FLAGS_NO_BYTES)
{
if (fFlags & DBGF_DISAS_FLAGS_NO_ADDRESS)
- RTStrPrintf(pszOutput, cbOutput, "%s", szBuf);
+ cch = RTStrPrintf(pszOutput, cbOutput, "%s", szBuf);
else if (fRealModeAddress)
- RTStrPrintf(pszOutput, cbOutput, "%04x:%04x %s", Sel, (unsigned)GCPtr, szBuf);
+ cch = RTStrPrintf(pszOutput, cbOutput, "%04x:%04x %s", Sel, (unsigned)GCPtr, szBuf);
else if (Sel == DBGF_SEL_FLAT)
{
if (enmMode >= PGMMODE_AMD64)
- RTStrPrintf(pszOutput, cbOutput, "%RGv %s", GCPtr, szBuf);
+ cch = RTStrPrintf(pszOutput, cbOutput, "%RGv %s", GCPtr, szBuf);
else
- RTStrPrintf(pszOutput, cbOutput, "%08RX32 %s", (uint32_t)GCPtr, szBuf);
+ cch = RTStrPrintf(pszOutput, cbOutput, "%08RX32 %s", (uint32_t)GCPtr, szBuf);
}
else
{
if (enmMode >= PGMMODE_AMD64)
- RTStrPrintf(pszOutput, cbOutput, "%04x:%RGv %s", Sel, GCPtr, szBuf);
+ cch = RTStrPrintf(pszOutput, cbOutput, "%04x:%RGv %s", Sel, GCPtr, szBuf);
else
- RTStrPrintf(pszOutput, cbOutput, "%04x:%08RX32 %s", Sel, (uint32_t)GCPtr, szBuf);
+ cch = RTStrPrintf(pszOutput, cbOutput, "%04x:%08RX32 %s", Sel, (uint32_t)GCPtr, szBuf);
}
}
else
@@ -527,42 +624,47 @@ dbgfR3DisasInstrExOnVCpu(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PRTGCPTR pGCPtr, uint
uint32_t cbInstr = State.Cpu.cbInstr;
uint8_t const *pabInstr = State.Cpu.abInstr;
if (fFlags & DBGF_DISAS_FLAGS_NO_ADDRESS)
- RTStrPrintf(pszOutput, cbOutput, "%.*Rhxs%*s %s",
- cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
- szBuf);
+ cch = RTStrPrintf(pszOutput, cbOutput, "%.*Rhxs%*s %s",
+ cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
+ szBuf);
else if (fRealModeAddress)
- RTStrPrintf(pszOutput, cbOutput, "%04x:%04x %.*Rhxs%*s %s",
- Sel, (unsigned)GCPtr,
- cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
- szBuf);
+ cch = RTStrPrintf(pszOutput, cbOutput, "%04x:%04x %.*Rhxs%*s %s",
+ Sel, (unsigned)GCPtr,
+ cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
+ szBuf);
else if (Sel == DBGF_SEL_FLAT)
{
if (enmMode >= PGMMODE_AMD64)
- RTStrPrintf(pszOutput, cbOutput, "%RGv %.*Rhxs%*s %s",
- GCPtr,
- cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
- szBuf);
+ cch = RTStrPrintf(pszOutput, cbOutput, "%RGv %.*Rhxs%*s %s",
+ GCPtr,
+ cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
+ szBuf);
else
- RTStrPrintf(pszOutput, cbOutput, "%08RX32 %.*Rhxs%*s %s",
- (uint32_t)GCPtr,
- cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
- szBuf);
+ cch = RTStrPrintf(pszOutput, cbOutput, "%08RX32 %.*Rhxs%*s %s",
+ (uint32_t)GCPtr,
+ cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
+ szBuf);
}
else
{
if (enmMode >= PGMMODE_AMD64)
- RTStrPrintf(pszOutput, cbOutput, "%04x:%RGv %.*Rhxs%*s %s",
- Sel, GCPtr,
- cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
- szBuf);
+ cch = RTStrPrintf(pszOutput, cbOutput, "%04x:%RGv %.*Rhxs%*s %s",
+ Sel, GCPtr,
+ cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
+ szBuf);
else
- RTStrPrintf(pszOutput, cbOutput, "%04x:%08RX32 %.*Rhxs%*s %s",
- Sel, (uint32_t)GCPtr,
- cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
- szBuf);
+ cch = RTStrPrintf(pszOutput, cbOutput, "%04x:%08RX32 %.*Rhxs%*s %s",
+ Sel, (uint32_t)GCPtr,
+ cbInstr, pabInstr, cbInstr < 8 ? (8 - cbInstr) * 3 : 0, "",
+ szBuf);
}
}
+#ifdef VBOX_WITH_RAW_MODE
+ if (szPatchAnnotations[0] && cch + 1 < cbOutput)
+ RTStrPrintf(pszOutput + cch, cbOutput - cch, " ; %s", szPatchAnnotations);
+#endif
+
if (pcbInstr)
*pcbInstr = State.Cpu.cbInstr;
@@ -575,7 +677,7 @@ dbgfR3DisasInstrExOnVCpu(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PRTGCPTR pGCPtr, uint
* Disassembles the one instruction according to the specified flags and address.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The ID of virtual CPU.
* @param Sel The code selector. This used to determine the 32/16 bit ness and
* calculation of the actual instruction address.
@@ -590,13 +692,15 @@ dbgfR3DisasInstrExOnVCpu(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PRTGCPTR pGCPtr, uint
* @remarks May have to switch to the EMT of the virtual CPU in order to do
* address conversion.
*/
-VMMR3DECL(int) DBGFR3DisasInstrEx(PVM pVM, VMCPUID idCpu, RTSEL Sel, RTGCPTR GCPtr, uint32_t fFlags,
+VMMR3DECL(int) DBGFR3DisasInstrEx(PUVM pUVM, VMCPUID idCpu, RTSEL Sel, RTGCPTR GCPtr, uint32_t fFlags,
char *pszOutput, uint32_t cbOutput, uint32_t *pcbInstr)
{
AssertReturn(cbOutput > 0, VERR_INVALID_PARAMETER);
*pszOutput = '\0';
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
- AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
AssertReturn(!(fFlags & ~DBGF_DISAS_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
AssertReturn((fFlags & DBGF_DISAS_FLAGS_MODE_MASK) <= DBGF_DISAS_FLAGS_64BIT_MODE, VERR_INVALID_PARAMETER);
@@ -625,15 +729,19 @@ VMMR3DECL(int) DBGFR3DisasInstrEx(PVM pVM, VMCPUID idCpu, RTSEL Sel, RTGCPTR GCP
* @param pszOutput Output buffer. This will always be properly
* terminated if @a cbOutput is greater than zero.
* @param cbOutput Size of the output buffer.
+ * @thread EMT(pVCpu)
*/
-VMMR3DECL(int) DBGFR3DisasInstrCurrent(PVMCPU pVCpu, char *pszOutput, uint32_t cbOutput)
+VMMR3_INT_DECL(int) DBGFR3DisasInstrCurrent(PVMCPU pVCpu, char *pszOutput, uint32_t cbOutput)
{
AssertReturn(cbOutput > 0, VERR_INVALID_PARAMETER);
*pszOutput = '\0';
- AssertReturn(pVCpu, VERR_INVALID_CONTEXT);
- return DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, 0, 0,
- DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
- pszOutput, cbOutput, NULL);
+ Assert(VMCPU_IS_EMT(pVCpu));
+
+ RTGCPTR GCPtr = 0;
+ return dbgfR3DisasInstrExOnVCpu(pVCpu->pVMR3, pVCpu, 0, &GCPtr,
+ DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE
+ | DBGF_DISAS_FLAGS_ANNOTATE_PATCHED,
+ pszOutput, cbOutput, NULL);
}
@@ -644,6 +752,7 @@ VMMR3DECL(int) DBGFR3DisasInstrCurrent(PVMCPU pVCpu, char *pszOutput, uint32_t c
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pszPrefix Short prefix string to the disassembly string. (optional)
+ * @thread EMT(pVCpu)
*/
VMMR3DECL(int) DBGFR3DisasInstrCurrentLogInternal(PVMCPU pVCpu, const char *pszPrefix)
{
@@ -653,7 +762,12 @@ VMMR3DECL(int) DBGFR3DisasInstrCurrentLogInternal(PVMCPU pVCpu, const char *pszP
if (RT_FAILURE(rc))
RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrCurrentLog failed with rc=%Rrc\n", rc);
if (pszPrefix && *pszPrefix)
- RTLogPrintf("%s-CPU%u: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
+ {
+ if (pVCpu->CTX_SUFF(pVM)->cCpus > 1)
+ RTLogPrintf("%s-CPU%u: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
+ else
+ RTLogPrintf("%s: %s\n", pszPrefix, szBuf);
+ }
else
RTLogPrintf("%s\n", szBuf);
return rc;
@@ -666,22 +780,30 @@ VMMR3DECL(int) DBGFR3DisasInstrCurrentLogInternal(PVMCPU pVCpu, const char *pszP
* Addresses will be attempted resolved to symbols.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU, defaults to CPU 0 if NULL.
* @param Sel The code selector. This used to determine the 32/16 bit-ness and
* calculation of the actual instruction address.
* @param GCPtr The code address relative to the base of Sel.
* @param pszPrefix Short prefix string to the disassembly string. (optional)
+ * @thread EMT(pVCpu)
*/
VMMR3DECL(int) DBGFR3DisasInstrLogInternal(PVMCPU pVCpu, RTSEL Sel, RTGCPTR GCPtr, const char *pszPrefix)
{
+ Assert(VMCPU_IS_EMT(pVCpu));
+
char szBuf[256];
- int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, Sel, GCPtr, DBGF_DISAS_FLAGS_DEFAULT_MODE,
- &szBuf[0], sizeof(szBuf), NULL);
+ RTGCPTR GCPtrTmp = GCPtr;
+ int rc = dbgfR3DisasInstrExOnVCpu(pVCpu->pVMR3, pVCpu, Sel, &GCPtrTmp, DBGF_DISAS_FLAGS_DEFAULT_MODE,
+ &szBuf[0], sizeof(szBuf), NULL);
if (RT_FAILURE(rc))
RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrLog(, %RTsel, %RGv) failed with rc=%Rrc\n", Sel, GCPtr, rc);
if (pszPrefix && *pszPrefix)
- RTLogPrintf("%s-CPU%u: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
+ {
+ if (pVCpu->CTX_SUFF(pVM)->cCpus > 1)
+ RTLogPrintf("%s-CPU%u: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
+ else
+ RTLogPrintf("%s: %s\n", pszPrefix, szBuf);
+ }
else
RTLogPrintf("%s\n", szBuf);
return rc;
diff --git a/src/VBox/VMM/VMMR3/DBGFInfo.cpp b/src/VBox/VMM/VMMR3/DBGFInfo.cpp
index d3ffe1e1..a1b0ca10 100644
--- a/src/VBox/VMM/VMMR3/DBGFInfo.cpp
+++ b/src/VBox/VMM/VMMR3/DBGFInfo.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -25,11 +25,13 @@
#include <VBox/vmm/mm.h>
#include "DBGFInternal.h"
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include <VBox/err.h>
#include <VBox/log.h>
#include <iprt/assert.h>
#include <iprt/ctype.h>
+#include <iprt/param.h>
#include <iprt/semaphore.h>
#include <iprt/stream.h>
#include <iprt/string.h>
@@ -77,26 +79,26 @@ static const DBGFINFOHLP g_dbgfR3InfoStdErrHlp =
* Initialize the info handlers.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
*/
-int dbgfR3InfoInit(PVM pVM)
+int dbgfR3InfoInit(PUVM pUVM)
{
/*
* Make sure we already didn't initialized in the lazy manner.
*/
- if (RTCritSectIsInitialized(&pVM->dbgf.s.InfoCritSect))
+ if (RTCritSectIsInitialized(&pUVM->dbgf.s.InfoCritSect))
return VINF_SUCCESS;
/*
* Initialize the crit sect.
*/
- int rc = RTCritSectInit(&pVM->dbgf.s.InfoCritSect);
+ int rc = RTCritSectInit(&pUVM->dbgf.s.InfoCritSect);
AssertRCReturn(rc, rc);
/*
* Register the 'info help' item.
*/
- rc = DBGFR3InfoRegisterInternal(pVM, "help", "List of info items.", dbgfR3InfoHelp);
+ rc = DBGFR3InfoRegisterInternal(pUVM->pVM, "help", "List of info items.", dbgfR3InfoHelp);
AssertRCReturn(rc, rc);
return VINF_SUCCESS;
@@ -107,14 +109,14 @@ int dbgfR3InfoInit(PVM pVM)
* Terminate the info handlers.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
*/
-int dbgfR3InfoTerm(PVM pVM)
+int dbgfR3InfoTerm(PUVM pUVM)
{
/*
* Delete the crit sect.
*/
- int rc = RTCritSectDelete(&pVM->dbgf.s.InfoCritSect);
+ int rc = RTCritSectDelete(&pUVM->dbgf.s.InfoCritSect);
AssertRC(rc);
return rc;
}
@@ -210,13 +212,13 @@ VMMR3DECL(PCDBGFINFOHLP) DBGFR3InfoLogRelHlp(void)
* Upon successful return the we're inside the crit sect and the caller must leave it.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszName The identifier of the info.
* @param pszDesc The description of the info and any arguments the handler may take.
* @param fFlags The flags.
* @param ppInfo Where to store the created
*/
-static int dbgfR3InfoRegister(PVM pVM, const char *pszName, const char *pszDesc, uint32_t fFlags, PDBGFINFO *ppInfo)
+static int dbgfR3InfoRegister(PUVM pUVM, const char *pszName, const char *pszDesc, uint32_t fFlags, PDBGFINFO *ppInfo)
{
/*
* Validate.
@@ -231,7 +233,7 @@ static int dbgfR3InfoRegister(PVM pVM, const char *pszName, const char *pszDesc,
*/
int rc;
size_t cchName = strlen(pszName) + 1;
- PDBGFINFO pInfo = (PDBGFINFO)MMR3HeapAlloc(pVM, MM_TAG_DBGF_INFO, RT_OFFSETOF(DBGFINFO, szName[cchName]));
+ PDBGFINFO pInfo = (PDBGFINFO)MMR3HeapAllocU(pUVM, MM_TAG_DBGF_INFO, RT_OFFSETOF(DBGFINFO, szName[cchName]));
if (pInfo)
{
pInfo->enmType = DBGFINFOTYPE_INVALID;
@@ -242,25 +244,25 @@ static int dbgfR3InfoRegister(PVM pVM, const char *pszName, const char *pszDesc,
/* lazy init */
rc = VINF_SUCCESS;
- if (!RTCritSectIsInitialized(&pVM->dbgf.s.InfoCritSect))
- rc = dbgfR3InfoInit(pVM);
+ if (!RTCritSectIsInitialized(&pUVM->dbgf.s.InfoCritSect))
+ rc = dbgfR3InfoInit(pUVM);
if (RT_SUCCESS(rc))
{
/*
* Insert in alphabetical order.
*/
- rc = RTCritSectEnter(&pVM->dbgf.s.InfoCritSect);
+ rc = RTCritSectEnter(&pUVM->dbgf.s.InfoCritSect);
AssertRC(rc);
PDBGFINFO pPrev = NULL;
PDBGFINFO pCur;
- for (pCur = pVM->dbgf.s.pInfoFirst; pCur; pPrev = pCur, pCur = pCur->pNext)
+ for (pCur = pUVM->dbgf.s.pInfoFirst; pCur; pPrev = pCur, pCur = pCur->pNext)
if (strcmp(pszName, pCur->szName) < 0)
break;
pInfo->pNext = pCur;
if (pPrev)
pPrev->pNext = pInfo;
else
- pVM->dbgf.s.pInfoFirst = pInfo;
+ pUVM->dbgf.s.pInfoFirst = pInfo;
*ppInfo = pInfo;
return VINF_SUCCESS;
@@ -283,7 +285,8 @@ static int dbgfR3InfoRegister(PVM pVM, const char *pszName, const char *pszDesc,
* @param pfnHandler The handler function to be called to display the info.
* @param pDevIns The device instance owning the info.
*/
-VMMR3DECL(int) DBGFR3InfoRegisterDevice(PVM pVM, const char *pszName, const char *pszDesc, PFNDBGFHANDLERDEV pfnHandler, PPDMDEVINS pDevIns)
+VMMR3_INT_DECL(int) DBGFR3InfoRegisterDevice(PVM pVM, const char *pszName, const char *pszDesc,
+ PFNDBGFHANDLERDEV pfnHandler, PPDMDEVINS pDevIns)
{
LogFlow(("DBGFR3InfoRegisterDevice: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p pDevIns=%p\n",
pszName, pszName, pszDesc, pszDesc, pfnHandler, pDevIns));
@@ -291,28 +294,20 @@ VMMR3DECL(int) DBGFR3InfoRegisterDevice(PVM pVM, const char *pszName, const char
/*
* Validate the specific stuff.
*/
- if (!pfnHandler)
- {
- AssertMsgFailed(("No handler\n"));
- return VERR_INVALID_PARAMETER;
- }
- if (!pDevIns)
- {
- AssertMsgFailed(("No pDevIns\n"));
- return VERR_INVALID_PARAMETER;
- }
+ AssertPtrReturn(pfnHandler, VERR_INVALID_POINTER);
+ AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
/*
* Register
*/
PDBGFINFO pInfo;
- int rc = dbgfR3InfoRegister(pVM, pszName, pszDesc, 0, &pInfo);
+ int rc = dbgfR3InfoRegister(pVM->pUVM, pszName, pszDesc, 0, &pInfo);
if (RT_SUCCESS(rc))
{
pInfo->enmType = DBGFINFOTYPE_DEV;
pInfo->u.Dev.pfnHandler = pfnHandler;
pInfo->u.Dev.pDevIns = pDevIns;
- RTCritSectLeave(&pVM->dbgf.s.InfoCritSect);
+ RTCritSectLeave(&pVM->pUVM->dbgf.s.InfoCritSect);
}
return rc;
@@ -329,7 +324,7 @@ VMMR3DECL(int) DBGFR3InfoRegisterDevice(PVM pVM, const char *pszName, const char
* @param pfnHandler The handler function to be called to display the info.
* @param pDrvIns The driver instance owning the info.
*/
-VMMR3DECL(int) DBGFR3InfoRegisterDriver(PVM pVM, const char *pszName, const char *pszDesc, PFNDBGFHANDLERDRV pfnHandler, PPDMDRVINS pDrvIns)
+VMMR3_INT_DECL(int) DBGFR3InfoRegisterDriver(PVM pVM, const char *pszName, const char *pszDesc, PFNDBGFHANDLERDRV pfnHandler, PPDMDRVINS pDrvIns)
{
LogFlow(("DBGFR3InfoRegisterDriver: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p pDrvIns=%p\n",
pszName, pszName, pszDesc, pszDesc, pfnHandler, pDrvIns));
@@ -337,28 +332,20 @@ VMMR3DECL(int) DBGFR3InfoRegisterDriver(PVM pVM, const char *pszName, const char
/*
* Validate the specific stuff.
*/
- if (!pfnHandler)
- {
- AssertMsgFailed(("No handler\n"));
- return VERR_INVALID_PARAMETER;
- }
- if (!pDrvIns)
- {
- AssertMsgFailed(("No pDrvIns\n"));
- return VERR_INVALID_PARAMETER;
- }
+ AssertPtrReturn(pfnHandler, VERR_INVALID_POINTER);
+ AssertPtrReturn(pDrvIns, VERR_INVALID_POINTER);
/*
* Register
*/
PDBGFINFO pInfo;
- int rc = dbgfR3InfoRegister(pVM, pszName, pszDesc, 0, &pInfo);
+ int rc = dbgfR3InfoRegister(pVM->pUVM, pszName, pszDesc, 0, &pInfo);
if (RT_SUCCESS(rc))
{
pInfo->enmType = DBGFINFOTYPE_DRV;
pInfo->u.Drv.pfnHandler = pfnHandler;
pInfo->u.Drv.pDrvIns = pDrvIns;
- RTCritSectLeave(&pVM->dbgf.s.InfoCritSect);
+ RTCritSectLeave(&pVM->pUVM->dbgf.s.InfoCritSect);
}
return rc;
@@ -374,7 +361,7 @@ VMMR3DECL(int) DBGFR3InfoRegisterDriver(PVM pVM, const char *pszName, const char
* @param pszDesc The description of the info and any arguments the handler may take.
* @param pfnHandler The handler function to be called to display the info.
*/
-VMMR3DECL(int) DBGFR3InfoRegisterInternal(PVM pVM, const char *pszName, const char *pszDesc, PFNDBGFHANDLERINT pfnHandler)
+VMMR3_INT_DECL(int) DBGFR3InfoRegisterInternal(PVM pVM, const char *pszName, const char *pszDesc, PFNDBGFHANDLERINT pfnHandler)
{
return DBGFR3InfoRegisterInternalEx(pVM, pszName, pszDesc, pfnHandler, 0);
}
@@ -390,7 +377,8 @@ VMMR3DECL(int) DBGFR3InfoRegisterInternal(PVM pVM, const char *pszName, const ch
* @param pfnHandler The handler function to be called to display the info.
* @param fFlags Flags, see the DBGFINFO_FLAGS_*.
*/
-VMMR3DECL(int) DBGFR3InfoRegisterInternalEx(PVM pVM, const char *pszName, const char *pszDesc, PFNDBGFHANDLERINT pfnHandler, uint32_t fFlags)
+VMMR3_INT_DECL(int) DBGFR3InfoRegisterInternalEx(PVM pVM, const char *pszName, const char *pszDesc,
+ PFNDBGFHANDLERINT pfnHandler, uint32_t fFlags)
{
LogFlow(("DBGFR3InfoRegisterInternal: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p fFlags=%x\n",
pszName, pszName, pszDesc, pszDesc, pfnHandler, fFlags));
@@ -398,22 +386,18 @@ VMMR3DECL(int) DBGFR3InfoRegisterInternalEx(PVM pVM, const char *pszName, const
/*
* Validate the specific stuff.
*/
- if (!pfnHandler)
- {
- AssertMsgFailed(("No handler\n"));
- return VERR_INVALID_PARAMETER;
- }
+ AssertPtrReturn(pfnHandler, VERR_INVALID_POINTER);
/*
* Register
*/
PDBGFINFO pInfo;
- int rc = dbgfR3InfoRegister(pVM, pszName, pszDesc, fFlags, &pInfo);
+ int rc = dbgfR3InfoRegister(pVM->pUVM, pszName, pszDesc, fFlags, &pInfo);
if (RT_SUCCESS(rc))
{
pInfo->enmType = DBGFINFOTYPE_INT;
pInfo->u.Int.pfnHandler = pfnHandler;
- RTCritSectLeave(&pVM->dbgf.s.InfoCritSect);
+ RTCritSectLeave(&pVM->pUVM->dbgf.s.InfoCritSect);
}
return rc;
@@ -424,13 +408,14 @@ VMMR3DECL(int) DBGFR3InfoRegisterInternalEx(PVM pVM, const char *pszName, const
* Register a info handler owned by an external component.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszName The identifier of the info.
* @param pszDesc The description of the info and any arguments the handler may take.
* @param pfnHandler The handler function to be called to display the info.
* @param pvUser User argument to be passed to the handler.
*/
-VMMR3DECL(int) DBGFR3InfoRegisterExternal(PVM pVM, const char *pszName, const char *pszDesc, PFNDBGFHANDLEREXT pfnHandler, void *pvUser)
+VMMR3DECL(int) DBGFR3InfoRegisterExternal(PUVM pUVM, const char *pszName, const char *pszDesc,
+ PFNDBGFHANDLEREXT pfnHandler, void *pvUser)
{
LogFlow(("DBGFR3InfoRegisterExternal: pszName=%p:{%s} pszDesc=%p:{%s} pfnHandler=%p pvUser=%p\n",
pszName, pszName, pszDesc, pszDesc, pfnHandler, pvUser));
@@ -438,23 +423,20 @@ VMMR3DECL(int) DBGFR3InfoRegisterExternal(PVM pVM, const char *pszName, const ch
/*
* Validate the specific stuff.
*/
- if (!pfnHandler)
- {
- AssertMsgFailed(("No handler\n"));
- return VERR_INVALID_PARAMETER;
- }
+ AssertPtrReturn(pfnHandler, VERR_INVALID_POINTER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
/*
* Register
*/
PDBGFINFO pInfo;
- int rc = dbgfR3InfoRegister(pVM, pszName, pszDesc, 0, &pInfo);
+ int rc = dbgfR3InfoRegister(pUVM, pszName, pszDesc, 0, &pInfo);
if (RT_SUCCESS(rc))
{
pInfo->enmType = DBGFINFOTYPE_EXT;
pInfo->u.Ext.pfnHandler = pfnHandler;
pInfo->u.Ext.pvUser = pvUser;
- RTCritSectLeave(&pVM->dbgf.s.InfoCritSect);
+ RTCritSectLeave(&pUVM->dbgf.s.InfoCritSect);
}
return rc;
@@ -469,28 +451,25 @@ VMMR3DECL(int) DBGFR3InfoRegisterExternal(PVM pVM, const char *pszName, const ch
* @param pDevIns Device instance.
* @param pszName The identifier of the info. If NULL all owned by the device.
*/
-VMMR3DECL(int) DBGFR3InfoDeregisterDevice(PVM pVM, PPDMDEVINS pDevIns, const char *pszName)
+VMMR3_INT_DECL(int) DBGFR3InfoDeregisterDevice(PVM pVM, PPDMDEVINS pDevIns, const char *pszName)
{
LogFlow(("DBGFR3InfoDeregisterDevice: pDevIns=%p pszName=%p:{%s}\n", pDevIns, pszName, pszName));
/*
* Validate input.
*/
- if (!pDevIns)
- {
- AssertMsgFailed(("!pDevIns\n"));
- return VERR_INVALID_PARAMETER;
- }
+ AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pszName, VERR_INVALID_POINTER);
size_t cchName = pszName ? strlen(pszName) : 0;
+ PUVM pUVM = pVM->pUVM;
/*
* Enumerate the info handlers and free the requested entries.
*/
- int rc = RTCritSectEnter(&pVM->dbgf.s.InfoCritSect);
- AssertRC(rc);
+ int rc = RTCritSectEnter(&pUVM->dbgf.s.InfoCritSect); AssertRC(rc);
rc = VERR_FILE_NOT_FOUND;
PDBGFINFO pPrev = NULL;
- PDBGFINFO pInfo = pVM->dbgf.s.pInfoFirst;
+ PDBGFINFO pInfo = pUVM->dbgf.s.pInfoFirst;
if (pszName)
{
/*
@@ -505,7 +484,7 @@ VMMR3DECL(int) DBGFR3InfoDeregisterDevice(PVM pVM, PPDMDEVINS pDevIns, const cha
if (pPrev)
pPrev->pNext = pInfo->pNext;
else
- pVM->dbgf.s.pInfoFirst = pInfo->pNext;
+ pUVM->dbgf.s.pInfoFirst = pInfo->pNext;
MMR3HeapFree(pInfo);
rc = VINF_SUCCESS;
break;
@@ -514,7 +493,7 @@ VMMR3DECL(int) DBGFR3InfoDeregisterDevice(PVM pVM, PPDMDEVINS pDevIns, const cha
else
{
/*
- * Free all owned by the driver.
+ * Free all owned by the device.
*/
for (; pInfo; pPrev = pInfo, pInfo = pInfo->pNext)
if ( pInfo->enmType == DBGFINFOTYPE_DEV
@@ -523,13 +502,13 @@ VMMR3DECL(int) DBGFR3InfoDeregisterDevice(PVM pVM, PPDMDEVINS pDevIns, const cha
if (pPrev)
pPrev->pNext = pInfo->pNext;
else
- pVM->dbgf.s.pInfoFirst = pInfo->pNext;
+ pUVM->dbgf.s.pInfoFirst = pInfo->pNext;
MMR3HeapFree(pInfo);
pInfo = pPrev;
}
rc = VINF_SUCCESS;
}
- int rc2 = RTCritSectLeave(&pVM->dbgf.s.InfoCritSect);
+ int rc2 = RTCritSectLeave(&pUVM->dbgf.s.InfoCritSect);
AssertRC(rc2);
AssertRC(rc);
LogFlow(("DBGFR3InfoDeregisterDevice: returns %Rrc\n", rc));
@@ -544,28 +523,25 @@ VMMR3DECL(int) DBGFR3InfoDeregisterDevice(PVM pVM, PPDMDEVINS pDevIns, const cha
* @param pDrvIns Driver instance.
* @param pszName The identifier of the info. If NULL all owned by the driver.
*/
-VMMR3DECL(int) DBGFR3InfoDeregisterDriver(PVM pVM, PPDMDRVINS pDrvIns, const char *pszName)
+VMMR3_INT_DECL(int) DBGFR3InfoDeregisterDriver(PVM pVM, PPDMDRVINS pDrvIns, const char *pszName)
{
LogFlow(("DBGFR3InfoDeregisterDriver: pDrvIns=%p pszName=%p:{%s}\n", pDrvIns, pszName, pszName));
/*
* Validate input.
*/
- if (!pDrvIns)
- {
- AssertMsgFailed(("!pDrvIns\n"));
- return VERR_INVALID_PARAMETER;
- }
+ AssertPtrReturn(pDrvIns, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pszName, VERR_INVALID_POINTER);
size_t cchName = pszName ? strlen(pszName) : 0;
+ PUVM pUVM = pVM->pUVM;
/*
* Enumerate the info handlers and free the requested entries.
*/
- int rc = RTCritSectEnter(&pVM->dbgf.s.InfoCritSect);
- AssertRC(rc);
+ int rc = RTCritSectEnter(&pUVM->dbgf.s.InfoCritSect); AssertRC(rc);
rc = VERR_FILE_NOT_FOUND;
PDBGFINFO pPrev = NULL;
- PDBGFINFO pInfo = pVM->dbgf.s.pInfoFirst;
+ PDBGFINFO pInfo = pUVM->dbgf.s.pInfoFirst;
if (pszName)
{
/*
@@ -580,7 +556,7 @@ VMMR3DECL(int) DBGFR3InfoDeregisterDriver(PVM pVM, PPDMDRVINS pDrvIns, const cha
if (pPrev)
pPrev->pNext = pInfo->pNext;
else
- pVM->dbgf.s.pInfoFirst = pInfo->pNext;
+ pUVM->dbgf.s.pInfoFirst = pInfo->pNext;
MMR3HeapFree(pInfo);
rc = VINF_SUCCESS;
break;
@@ -598,13 +574,13 @@ VMMR3DECL(int) DBGFR3InfoDeregisterDriver(PVM pVM, PPDMDRVINS pDrvIns, const cha
if (pPrev)
pPrev->pNext = pInfo->pNext;
else
- pVM->dbgf.s.pInfoFirst = pInfo->pNext;
+ pUVM->dbgf.s.pInfoFirst = pInfo->pNext;
MMR3HeapFree(pInfo);
pInfo = pPrev;
}
rc = VINF_SUCCESS;
}
- int rc2 = RTCritSectLeave(&pVM->dbgf.s.InfoCritSect);
+ int rc2 = RTCritSectLeave(&pUVM->dbgf.s.InfoCritSect);
AssertRC(rc2);
AssertRC(rc);
LogFlow(("DBGFR3InfoDeregisterDriver: returns %Rrc\n", rc));
@@ -616,30 +592,26 @@ VMMR3DECL(int) DBGFR3InfoDeregisterDriver(PVM pVM, PPDMDRVINS pDrvIns, const cha
* Internal deregistration helper.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM Pointer to the VM.
* @param pszName The identifier of the info.
* @param enmType The info owner type.
*/
-static int dbgfR3InfoDeregister(PVM pVM, const char *pszName, DBGFINFOTYPE enmType)
+static int dbgfR3InfoDeregister(PUVM pUVM, const char *pszName, DBGFINFOTYPE enmType)
{
/*
* Validate input.
*/
- if (!pszName)
- {
- AssertMsgFailed(("!pszName\n"));
- return VERR_INVALID_PARAMETER;
- }
+ AssertPtrReturn(pszName, VERR_INVALID_POINTER);
/*
* Find the info handler.
*/
size_t cchName = strlen(pszName);
- int rc = RTCritSectEnter(&pVM->dbgf.s.InfoCritSect);
+ int rc = RTCritSectEnter(&pUVM->dbgf.s.InfoCritSect);
AssertRC(rc);
rc = VERR_FILE_NOT_FOUND;
PDBGFINFO pPrev = NULL;
- PDBGFINFO pInfo = pVM->dbgf.s.pInfoFirst;
+ PDBGFINFO pInfo = pUVM->dbgf.s.pInfoFirst;
for (; pInfo; pPrev = pInfo, pInfo = pInfo->pNext)
if ( pInfo->cchName == cchName
&& !strcmp(pInfo->szName, pszName)
@@ -648,12 +620,12 @@ static int dbgfR3InfoDeregister(PVM pVM, const char *pszName, DBGFINFOTYPE enmTy
if (pPrev)
pPrev->pNext = pInfo->pNext;
else
- pVM->dbgf.s.pInfoFirst = pInfo->pNext;
+ pUVM->dbgf.s.pInfoFirst = pInfo->pNext;
MMR3HeapFree(pInfo);
rc = VINF_SUCCESS;
break;
}
- int rc2 = RTCritSectLeave(&pVM->dbgf.s.InfoCritSect);
+ int rc2 = RTCritSectLeave(&pUVM->dbgf.s.InfoCritSect);
AssertRC(rc2);
AssertRC(rc);
LogFlow(("dbgfR3InfoDeregister: returns %Rrc\n", rc));
@@ -668,10 +640,10 @@ static int dbgfR3InfoDeregister(PVM pVM, const char *pszName, DBGFINFOTYPE enmTy
* @param pVM Pointer to the VM.
* @param pszName The identifier of the info. If NULL all owned by the device.
*/
-VMMR3DECL(int) DBGFR3InfoDeregisterInternal(PVM pVM, const char *pszName)
+VMMR3_INT_DECL(int) DBGFR3InfoDeregisterInternal(PVM pVM, const char *pszName)
{
LogFlow(("DBGFR3InfoDeregisterInternal: pszName=%p:{%s}\n", pszName, pszName));
- return dbgfR3InfoDeregister(pVM, pszName, DBGFINFOTYPE_INT);
+ return dbgfR3InfoDeregister(pVM->pUVM, pszName, DBGFINFOTYPE_INT);
}
@@ -679,13 +651,14 @@ VMMR3DECL(int) DBGFR3InfoDeregisterInternal(PVM pVM, const char *pszName)
* Deregister a info handler owned by an external component.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszName The identifier of the info. If NULL all owned by the device.
*/
-VMMR3DECL(int) DBGFR3InfoDeregisterExternal(PVM pVM, const char *pszName)
+VMMR3DECL(int) DBGFR3InfoDeregisterExternal(PUVM pUVM, const char *pszName)
{
LogFlow(("DBGFR3InfoDeregisterExternal: pszName=%p:{%s}\n", pszName, pszName));
- return dbgfR3InfoDeregister(pVM, pszName, DBGFINFOTYPE_EXT);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ return dbgfR3InfoDeregister(pUVM, pszName, DBGFINFOTYPE_EXT);
}
@@ -693,27 +666,25 @@ VMMR3DECL(int) DBGFR3InfoDeregisterExternal(PVM pVM, const char *pszName)
* Worker for DBGFR3Info and DBGFR3InfoEx.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param idCpu Which CPU to run EMT bound handlers on.
- * VMCPUID_ANY or a valid CPU ID.
- * @param pszName What to dump.
- * @param pszArgs Arguments, optional.
- * @param pHlp Output helper, optional.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu Which CPU to run EMT bound handlers on. VMCPUID_ANY or
+ * a valid CPU ID.
+ * @param pszName What to dump.
+ * @param pszArgs Arguments, optional.
+ * @param pHlp Output helper, optional.
*/
-static DECLCALLBACK(int) dbgfR3Info(PVM pVM, VMCPUID idCpu, const char *pszName, const char *pszArgs, PCDBGFINFOHLP pHlp)
+static DECLCALLBACK(int) dbgfR3Info(PUVM pUVM, VMCPUID idCpu, const char *pszName, const char *pszArgs, PCDBGFINFOHLP pHlp)
{
/*
* Validate input.
*/
AssertPtrReturn(pszName, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pszArgs, VERR_INVALID_POINTER);
if (pHlp)
{
- if ( !pHlp->pfnPrintf
- || !pHlp->pfnPrintfV)
- {
- AssertMsgFailed(("A pHlp member is missing!\n"));
- return VERR_INVALID_PARAMETER;
- }
+ AssertPtrReturn(pHlp, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pHlp->pfnPrintf, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pHlp->pfnPrintfV, VERR_INVALID_PARAMETER);
}
else
pHlp = &g_dbgfR3InfoLogHlp;
@@ -722,9 +693,9 @@ static DECLCALLBACK(int) dbgfR3Info(PVM pVM, VMCPUID idCpu, const char *pszName,
* Find the info handler.
*/
size_t cchName = strlen(pszName);
- int rc = RTCritSectEnter(&pVM->dbgf.s.InfoCritSect);
+ int rc = RTCritSectEnter(&pUVM->dbgf.s.InfoCritSect);
AssertRC(rc);
- PDBGFINFO pInfo = pVM->dbgf.s.pInfoFirst;
+ PDBGFINFO pInfo = pUVM->dbgf.s.pInfoFirst;
for (; pInfo; pInfo = pInfo->pNext)
if ( pInfo->cchName == cchName
&& !memcmp(pInfo->szName, pszName, cchName))
@@ -737,35 +708,40 @@ static DECLCALLBACK(int) dbgfR3Info(PVM pVM, VMCPUID idCpu, const char *pszName,
* Switch on the type and invoke the handler.
*/
DBGFINFO Info = *pInfo;
- rc = RTCritSectLeave(&pVM->dbgf.s.InfoCritSect);
+ rc = RTCritSectLeave(&pUVM->dbgf.s.InfoCritSect);
AssertRC(rc);
rc = VINF_SUCCESS;
switch (Info.enmType)
{
case DBGFINFOTYPE_DEV:
if (Info.fFlags & DBGFINFO_FLAGS_RUN_ON_EMT)
- rc = VMR3ReqCallVoidWait(pVM, idCpu, (PFNRT)Info.u.Dev.pfnHandler, 3, Info.u.Dev.pDevIns, pHlp, pszArgs);
+ rc = VMR3ReqCallVoidWaitU(pUVM, idCpu, (PFNRT)Info.u.Dev.pfnHandler, 3, Info.u.Dev.pDevIns, pHlp, pszArgs);
else
Info.u.Dev.pfnHandler(Info.u.Dev.pDevIns, pHlp, pszArgs);
break;
case DBGFINFOTYPE_DRV:
if (Info.fFlags & DBGFINFO_FLAGS_RUN_ON_EMT)
- rc = VMR3ReqCallVoidWait(pVM, idCpu, (PFNRT)Info.u.Drv.pfnHandler, 3, Info.u.Drv.pDrvIns, pHlp, pszArgs);
+ rc = VMR3ReqCallVoidWaitU(pUVM, idCpu, (PFNRT)Info.u.Drv.pfnHandler, 3, Info.u.Drv.pDrvIns, pHlp, pszArgs);
else
Info.u.Drv.pfnHandler(Info.u.Drv.pDrvIns, pHlp, pszArgs);
break;
case DBGFINFOTYPE_INT:
- if (Info.fFlags & DBGFINFO_FLAGS_RUN_ON_EMT)
- rc = VMR3ReqCallVoidWait(pVM, idCpu, (PFNRT)Info.u.Int.pfnHandler, 3, pVM, pHlp, pszArgs);
+ if (RT_VALID_PTR(pUVM->pVM))
+ {
+ if (Info.fFlags & DBGFINFO_FLAGS_RUN_ON_EMT)
+ rc = VMR3ReqCallVoidWaitU(pUVM, idCpu, (PFNRT)Info.u.Int.pfnHandler, 3, pUVM->pVM, pHlp, pszArgs);
+ else
+ Info.u.Int.pfnHandler(pUVM->pVM, pHlp, pszArgs);
+ }
else
- Info.u.Int.pfnHandler(pVM, pHlp, pszArgs);
+ rc = VERR_INVALID_VM_HANDLE;
break;
case DBGFINFOTYPE_EXT:
if (Info.fFlags & DBGFINFO_FLAGS_RUN_ON_EMT)
- rc = VMR3ReqCallVoidWait(pVM, idCpu, (PFNRT)Info.u.Ext.pfnHandler, 3, Info.u.Ext.pvUser, pHlp, pszArgs);
+ rc = VMR3ReqCallVoidWaitU(pUVM, idCpu, (PFNRT)Info.u.Ext.pfnHandler, 3, Info.u.Ext.pvUser, pHlp, pszArgs);
else
Info.u.Ext.pfnHandler(Info.u.Ext.pvUser, pHlp, pszArgs);
break;
@@ -776,7 +752,7 @@ static DECLCALLBACK(int) dbgfR3Info(PVM pVM, VMCPUID idCpu, const char *pszName,
}
else
{
- rc = RTCritSectLeave(&pVM->dbgf.s.InfoCritSect);
+ rc = RTCritSectLeave(&pUVM->dbgf.s.InfoCritSect);
AssertRC(rc);
rc = VERR_FILE_NOT_FOUND;
}
@@ -787,14 +763,15 @@ static DECLCALLBACK(int) dbgfR3Info(PVM pVM, VMCPUID idCpu, const char *pszName,
* Display a piece of info writing to the supplied handler.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszName The identifier of the info to display.
* @param pszArgs Arguments to the info handler.
* @param pHlp The output helper functions. If NULL the logger will be used.
*/
-VMMR3DECL(int) DBGFR3Info(PVM pVM, const char *pszName, const char *pszArgs, PCDBGFINFOHLP pHlp)
+VMMR3DECL(int) DBGFR3Info(PUVM pUVM, const char *pszName, const char *pszArgs, PCDBGFINFOHLP pHlp)
{
- return dbgfR3Info(pVM, VMCPUID_ANY, pszName, pszArgs, pHlp);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ return dbgfR3Info(pUVM, VMCPUID_ANY, pszName, pszArgs, pHlp);
}
@@ -802,19 +779,20 @@ VMMR3DECL(int) DBGFR3Info(PVM pVM, const char *pszName, const char *pszArgs, PCD
* Display a piece of info writing to the supplied handler.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The CPU to exectue the request on. Pass NIL_VMCPUID
* to not involve any EMT.
* @param pszName The identifier of the info to display.
* @param pszArgs Arguments to the info handler.
* @param pHlp The output helper functions. If NULL the logger will be used.
*/
-VMMR3DECL(int) DBGFR3InfoEx(PVM pVM, VMCPUID idCpu, const char *pszName, const char *pszArgs, PCDBGFINFOHLP pHlp)
+VMMR3DECL(int) DBGFR3InfoEx(PUVM pUVM, VMCPUID idCpu, const char *pszName, const char *pszArgs, PCDBGFINFOHLP pHlp)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
if (idCpu == NIL_VMCPUID)
- return dbgfR3Info(pVM, VMCPUID_ANY, pszName, pszArgs, pHlp);
- return VMR3ReqPriorityCallWait(pVM, idCpu,
- (PFNRT)dbgfR3Info, 5, pVM, idCpu, pszName, pszArgs, pHlp);
+ return dbgfR3Info(pUVM, VMCPUID_ANY, pszName, pszArgs, pHlp);
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu,
+ (PFNRT)dbgfR3Info, 5, pUVM, idCpu, pszName, pszArgs, pHlp);
}
@@ -822,13 +800,13 @@ VMMR3DECL(int) DBGFR3InfoEx(PVM pVM, VMCPUID idCpu, const char *pszName, const c
* Wrapper for DBGFR3Info that outputs to the release log.
*
* @returns See DBGFR3Info.
- * @param pVM Pointer to the VM.
- * @param pszName See DBGFR3Info.
- * @param pszArgs See DBGFR3Info.
+ * @param pUVM The user mode VM handle.
+ * @param pszName See DBGFR3Info.
+ * @param pszArgs See DBGFR3Info.
*/
-VMMR3DECL(int) DBGFR3InfoLogRel(PVM pVM, const char *pszName, const char *pszArgs)
+VMMR3DECL(int) DBGFR3InfoLogRel(PUVM pUVM, const char *pszName, const char *pszArgs)
{
- return DBGFR3Info(pVM, pszName, pszArgs, &g_dbgfR3InfoLogRelHlp);
+ return DBGFR3Info(pUVM, pszName, pszArgs, &g_dbgfR3InfoLogRelHlp);
}
@@ -836,13 +814,13 @@ VMMR3DECL(int) DBGFR3InfoLogRel(PVM pVM, const char *pszName, const char *pszArg
* Wrapper for DBGFR3Info that outputs to standard error.
*
* @returns See DBGFR3Info.
- * @param pVM Pointer to the VM.
- * @param pszName See DBGFR3Info.
- * @param pszArgs See DBGFR3Info.
+ * @param pUVM The user mode VM handle.
+ * @param pszName See DBGFR3Info.
+ * @param pszArgs See DBGFR3Info.
*/
-VMMR3DECL(int) DBGFR3InfoStdErr(PVM pVM, const char *pszName, const char *pszArgs)
+VMMR3DECL(int) DBGFR3InfoStdErr(PUVM pUVM, const char *pszName, const char *pszArgs)
{
- return DBGFR3Info(pVM, pszName, pszArgs, &g_dbgfR3InfoStdErrHlp);
+ return DBGFR3Info(pUVM, pszName, pszArgs, &g_dbgfR3InfoStdErrHlp);
}
@@ -862,12 +840,13 @@ VMMR3DECL(int) DBGFR3InfoStdErr(PVM pVM, const char *pszName, const char *pszArg
*
* @threads EMT
*/
-VMMR3DECL(int) DBGFR3InfoMulti(PVM pVM, const char *pszIncludePat, const char *pszExcludePat, const char *pszSepFmt,
- PCDBGFINFOHLP pHlp)
+VMMR3_INT_DECL(int) DBGFR3InfoMulti(PVM pVM, const char *pszIncludePat, const char *pszExcludePat, const char *pszSepFmt,
+ PCDBGFINFOHLP pHlp)
{
/*
* Validate input.
*/
+ PUVM pUVM = pVM->pUVM;
VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
AssertPtrReturn(pszIncludePat, VERR_INVALID_POINTER);
AssertPtrReturn(pszExcludePat, VERR_INVALID_POINTER);
@@ -887,10 +866,10 @@ VMMR3DECL(int) DBGFR3InfoMulti(PVM pVM, const char *pszIncludePat, const char *p
* Enumerate the info handlers and call the ones matching.
* Note! We won't leave the critical section here...
*/
- int rc = RTCritSectEnter(&pVM->dbgf.s.InfoCritSect);
+ int rc = RTCritSectEnter(&pUVM->dbgf.s.InfoCritSect);
AssertRC(rc);
rc = VWRN_NOT_FOUND;
- for (PDBGFINFO pInfo = pVM->dbgf.s.pInfoFirst; pInfo; pInfo = pInfo->pNext)
+ for (PDBGFINFO pInfo = pUVM->dbgf.s.pInfoFirst; pInfo; pInfo = pInfo->pNext)
{
if ( RTStrSimplePatternMultiMatch(pszIncludePat, cchIncludePat, pInfo->szName, pInfo->cchName, NULL)
&& !RTStrSimplePatternMultiMatch(pszExcludePat, cchExcludePat, pInfo->szName, pInfo->cchName, NULL))
@@ -901,28 +880,31 @@ VMMR3DECL(int) DBGFR3InfoMulti(PVM pVM, const char *pszIncludePat, const char *p
{
case DBGFINFOTYPE_DEV:
if (pInfo->fFlags & DBGFINFO_FLAGS_RUN_ON_EMT)
- rc = VMR3ReqCallVoidWait(pVM, VMCPUID_ANY, (PFNRT)pInfo->u.Dev.pfnHandler, 3, pInfo->u.Dev.pDevIns, pHlp, pszArgs);
+ rc = VMR3ReqCallVoidWaitU(pUVM, VMCPUID_ANY, (PFNRT)pInfo->u.Dev.pfnHandler, 3,
+ pInfo->u.Dev.pDevIns, pHlp, pszArgs);
else
pInfo->u.Dev.pfnHandler(pInfo->u.Dev.pDevIns, pHlp, pszArgs);
break;
case DBGFINFOTYPE_DRV:
if (pInfo->fFlags & DBGFINFO_FLAGS_RUN_ON_EMT)
- rc = VMR3ReqCallVoidWait(pVM, VMCPUID_ANY, (PFNRT)pInfo->u.Drv.pfnHandler, 3, pInfo->u.Drv.pDrvIns, pHlp, pszArgs);
+ rc = VMR3ReqCallVoidWaitU(pUVM, VMCPUID_ANY, (PFNRT)pInfo->u.Drv.pfnHandler, 3,
+ pInfo->u.Drv.pDrvIns, pHlp, pszArgs);
else
pInfo->u.Drv.pfnHandler(pInfo->u.Drv.pDrvIns, pHlp, pszArgs);
break;
case DBGFINFOTYPE_INT:
if (pInfo->fFlags & DBGFINFO_FLAGS_RUN_ON_EMT)
- rc = VMR3ReqCallVoidWait(pVM, VMCPUID_ANY, (PFNRT)pInfo->u.Int.pfnHandler, 3, pVM, pHlp, pszArgs);
+ rc = VMR3ReqCallVoidWaitU(pUVM, VMCPUID_ANY, (PFNRT)pInfo->u.Int.pfnHandler, 3, pVM, pHlp, pszArgs);
else
pInfo->u.Int.pfnHandler(pVM, pHlp, pszArgs);
break;
case DBGFINFOTYPE_EXT:
if (pInfo->fFlags & DBGFINFO_FLAGS_RUN_ON_EMT)
- rc = VMR3ReqCallVoidWait(pVM, VMCPUID_ANY, (PFNRT)pInfo->u.Ext.pfnHandler, 3, pInfo->u.Ext.pvUser, pHlp, pszArgs);
+ rc = VMR3ReqCallVoidWaitU(pUVM, VMCPUID_ANY, (PFNRT)pInfo->u.Ext.pfnHandler, 3,
+ pInfo->u.Ext.pvUser, pHlp, pszArgs);
else
pInfo->u.Ext.pfnHandler(pInfo->u.Ext.pvUser, pHlp, pszArgs);
break;
@@ -932,7 +914,7 @@ VMMR3DECL(int) DBGFR3InfoMulti(PVM pVM, const char *pszIncludePat, const char *p
}
}
}
- int rc2 = RTCritSectLeave(&pVM->dbgf.s.InfoCritSect);
+ int rc2 = RTCritSectLeave(&pUVM->dbgf.s.InfoCritSect);
AssertRC(rc2);
return rc;
@@ -943,11 +925,11 @@ VMMR3DECL(int) DBGFR3InfoMulti(PVM pVM, const char *pszIncludePat, const char *p
* Enumerate all the register info handlers.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pfnCallback Pointer to callback function.
* @param pvUser User argument to pass to the callback.
*/
-VMMR3DECL(int) DBGFR3InfoEnum(PVM pVM, PFNDBGFINFOENUM pfnCallback, void *pvUser)
+VMMR3DECL(int) DBGFR3InfoEnum(PUVM pUVM, PFNDBGFINFOENUM pfnCallback, void *pvUser)
{
LogFlow(("DBGFR3InfoLog: pfnCallback=%p pvUser=%p\n", pfnCallback, pvUser));
@@ -959,21 +941,22 @@ VMMR3DECL(int) DBGFR3InfoEnum(PVM pVM, PFNDBGFINFOENUM pfnCallback, void *pvUser
AssertMsgFailed(("!pfnCallback\n"));
return VERR_INVALID_PARAMETER;
}
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
/*
* Enter and enumerate.
*/
- int rc = RTCritSectEnter(&pVM->dbgf.s.InfoCritSect);
+ int rc = RTCritSectEnter(&pUVM->dbgf.s.InfoCritSect);
AssertRC(rc);
rc = VINF_SUCCESS;
- for (PDBGFINFO pInfo = pVM->dbgf.s.pInfoFirst; RT_SUCCESS(rc) && pInfo; pInfo = pInfo->pNext)
- rc = pfnCallback(pVM, pInfo->szName, pInfo->pszDesc, pvUser);
+ for (PDBGFINFO pInfo = pUVM->dbgf.s.pInfoFirst; RT_SUCCESS(rc) && pInfo; pInfo = pInfo->pNext)
+ rc = pfnCallback(pUVM, pInfo->szName, pInfo->pszDesc, pvUser);
/*
* Leave and exit.
*/
- int rc2 = RTCritSectLeave(&pVM->dbgf.s.InfoCritSect);
+ int rc2 = RTCritSectLeave(&pUVM->dbgf.s.InfoCritSect);
AssertRC(rc2);
LogFlow(("DBGFR3InfoLog: returns %Rrc\n", rc));
@@ -995,12 +978,13 @@ static DECLCALLBACK(void) dbgfR3InfoHelp(PVM pVM, PCDBGFINFOHLP pHlp, const char
/*
* Enter and enumerate.
*/
- int rc = RTCritSectEnter(&pVM->dbgf.s.InfoCritSect);
+ PUVM pUVM = pVM->pUVM;
+ int rc = RTCritSectEnter(&pUVM->dbgf.s.InfoCritSect);
AssertRC(rc);
if (pszArgs && *pszArgs)
{
- for (PDBGFINFO pInfo = pVM->dbgf.s.pInfoFirst; pInfo; pInfo = pInfo->pNext)
+ for (PDBGFINFO pInfo = pUVM->dbgf.s.pInfoFirst; pInfo; pInfo = pInfo->pNext)
{
const char *psz = strstr(pszArgs, pInfo->szName);
if ( psz
@@ -1014,7 +998,7 @@ static DECLCALLBACK(void) dbgfR3InfoHelp(PVM pVM, PCDBGFINFOHLP pHlp, const char
}
else
{
- for (PDBGFINFO pInfo = pVM->dbgf.s.pInfoFirst; pInfo; pInfo = pInfo->pNext)
+ for (PDBGFINFO pInfo = pUVM->dbgf.s.pInfoFirst; pInfo; pInfo = pInfo->pNext)
pHlp->pfnPrintf(pHlp, "%-16s %s\n",
pInfo->szName, pInfo->pszDesc);
}
@@ -1022,7 +1006,7 @@ static DECLCALLBACK(void) dbgfR3InfoHelp(PVM pVM, PCDBGFINFOHLP pHlp, const char
/*
* Leave and exit.
*/
- rc = RTCritSectLeave(&pVM->dbgf.s.InfoCritSect);
+ rc = RTCritSectLeave(&pUVM->dbgf.s.InfoCritSect);
AssertRC(rc);
}
diff --git a/src/VBox/VMM/VMMR3/DBGFLog.cpp b/src/VBox/VMM/VMMR3/DBGFLog.cpp
index e5b4047b..e55cd54f 100644
--- a/src/VBox/VMM/VMMR3/DBGFLog.cpp
+++ b/src/VBox/VMM/VMMR3/DBGFLog.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -22,20 +22,15 @@
#include <VBox/vmm/vmapi.h>
#include <VBox/vmm/vmm.h>
#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/vm.h>
#include <VBox/log.h>
#include <VBox/err.h>
#include <iprt/assert.h>
+#include <iprt/param.h>
#include <iprt/string.h>
-/*******************************************************************************
-* Internal Functions *
-*******************************************************************************/
-static DECLCALLBACK(int) dbgfR3LogModifyGroups(PVM pVM, const char *pszGroupSettings);
-static DECLCALLBACK(int) dbgfR3LogModifyFlags(PVM pVM, const char *pszFlagSettings);
-static DECLCALLBACK(int) dbgfR3LogModifyDestinations(PVM pVM, const char *pszDestSettings);
-
-
/**
* Checkes for logger prefixes and selects the right logger.
*
@@ -46,14 +41,14 @@ static PRTLOGGER dbgfR3LogResolvedLogger(const char **ppsz)
{
PRTLOGGER pLogger;
const char *psz = *ppsz;
- if (!strncmp(psz, "release:", sizeof("release:") - 1))
+ if (!strncmp(psz, RT_STR_TUPLE("release:")))
{
*ppsz += sizeof("release:") - 1;
pLogger = RTLogRelDefaultInstance();
}
else
{
- if (!strncmp(psz, "debug:", sizeof("debug:") - 1))
+ if (!strncmp(psz, RT_STR_TUPLE("debug:")))
*ppsz += sizeof("debug:") - 1;
pLogger = RTLogDefaultInstance();
}
@@ -62,62 +57,45 @@ static PRTLOGGER dbgfR3LogResolvedLogger(const char **ppsz)
/**
- * Changes the logger group settings.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pszGroupSettings The group settings string. (VBOX_LOG)
- * By prefixing the string with \"release:\" the
- * changes will be applied to the release log
- * instead of the debug log. The prefix \"debug:\"
- * is also recognized.
- */
-VMMR3DECL(int) DBGFR3LogModifyGroups(PVM pVM, const char *pszGroupSettings)
-{
- AssertPtrReturn(pVM, VERR_INVALID_POINTER);
- AssertPtrReturn(pszGroupSettings, VERR_INVALID_POINTER);
-
- return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)dbgfR3LogModifyGroups, 2, pVM, pszGroupSettings);
-}
-
-
-/**
* EMT worker for DBGFR3LogModifyGroups.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszGroupSettings The group settings string. (VBOX_LOG)
*/
-static DECLCALLBACK(int) dbgfR3LogModifyGroups(PVM pVM, const char *pszGroupSettings)
+static DECLCALLBACK(int) dbgfR3LogModifyGroups(PUVM pUVM, const char *pszGroupSettings)
{
PRTLOGGER pLogger = dbgfR3LogResolvedLogger(&pszGroupSettings);
if (!pLogger)
return VINF_SUCCESS;
int rc = RTLogGroupSettings(pLogger, pszGroupSettings);
- if (RT_SUCCESS(rc))
- rc = VMMR3UpdateLoggers(pVM);
+ if (RT_SUCCESS(rc) && pUVM->pVM)
+ {
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ rc = VMMR3UpdateLoggers(pUVM->pVM);
+ }
return rc;
}
/**
- * Changes the logger flag settings.
+ * Changes the logger group settings.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pszFlagSettings The group settings string. (VBOX_LOG_FLAGS)
+ * @param pUVM The user mode VM handle.
+ * @param pszGroupSettings The group settings string. (VBOX_LOG)
* By prefixing the string with \"release:\" the
* changes will be applied to the release log
* instead of the debug log. The prefix \"debug:\"
* is also recognized.
*/
-VMMR3DECL(int) DBGFR3LogModifyFlags(PVM pVM, const char *pszFlagSettings)
+VMMR3DECL(int) DBGFR3LogModifyGroups(PUVM pUVM, const char *pszGroupSettings)
{
- AssertPtrReturn(pVM, VERR_INVALID_POINTER);
- AssertPtrReturn(pszFlagSettings, VERR_INVALID_POINTER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszGroupSettings, VERR_INVALID_POINTER);
- return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)dbgfR3LogModifyFlags, 2, pVM, pszFlagSettings);
+ return VMR3ReqPriorityCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)dbgfR3LogModifyGroups, 2, pUVM, pszGroupSettings);
}
@@ -125,39 +103,42 @@ VMMR3DECL(int) DBGFR3LogModifyFlags(PVM pVM, const char *pszFlagSettings)
* EMT worker for DBGFR3LogModifyFlags.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszFlagSettings The group settings string. (VBOX_LOG_FLAGS)
*/
-static DECLCALLBACK(int) dbgfR3LogModifyFlags(PVM pVM, const char *pszFlagSettings)
+static DECLCALLBACK(int) dbgfR3LogModifyFlags(PUVM pUVM, const char *pszFlagSettings)
{
PRTLOGGER pLogger = dbgfR3LogResolvedLogger(&pszFlagSettings);
if (!pLogger)
return VINF_SUCCESS;
int rc = RTLogFlags(pLogger, pszFlagSettings);
- if (RT_SUCCESS(rc))
- rc = VMMR3UpdateLoggers(pVM);
+ if (RT_SUCCESS(rc) && pUVM->pVM)
+ {
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ rc = VMMR3UpdateLoggers(pUVM->pVM);
+ }
return rc;
}
/**
- * Changes the logger destination settings.
+ * Changes the logger flag settings.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pszDestSettings The destination settings string. (VBOX_LOG_DEST)
+ * @param pUVM The user mode VM handle.
+ * @param pszFlagSettings The group settings string. (VBOX_LOG_FLAGS)
* By prefixing the string with \"release:\" the
* changes will be applied to the release log
* instead of the debug log. The prefix \"debug:\"
* is also recognized.
*/
-VMMR3DECL(int) DBGFR3LogModifyDestinations(PVM pVM, const char *pszDestSettings)
+VMMR3DECL(int) DBGFR3LogModifyFlags(PUVM pUVM, const char *pszFlagSettings)
{
- AssertReturn(VALID_PTR(pVM), VERR_INVALID_POINTER);
- AssertReturn(VALID_PTR(pszDestSettings), VERR_INVALID_POINTER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszFlagSettings, VERR_INVALID_POINTER);
- return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)dbgfR3LogModifyDestinations, 2, pVM, pszDestSettings);
+ return VMR3ReqPriorityCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)dbgfR3LogModifyFlags, 2, pUVM, pszFlagSettings);
}
@@ -165,18 +146,41 @@ VMMR3DECL(int) DBGFR3LogModifyDestinations(PVM pVM, const char *pszDestSettings)
* EMT worker for DBGFR3LogModifyFlags.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszDestSettings The destination settings string. (VBOX_LOG_DEST)
*/
-static DECLCALLBACK(int) dbgfR3LogModifyDestinations(PVM pVM, const char *pszDestSettings)
+static DECLCALLBACK(int) dbgfR3LogModifyDestinations(PUVM pUVM, const char *pszDestSettings)
{
PRTLOGGER pLogger = dbgfR3LogResolvedLogger(&pszDestSettings);
if (!pLogger)
return VINF_SUCCESS;
int rc = RTLogDestinations(NULL, pszDestSettings);
- if (RT_SUCCESS(rc))
- rc = VMMR3UpdateLoggers(pVM);
+ if (RT_SUCCESS(rc) && pUVM->pVM)
+ {
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ rc = VMMR3UpdateLoggers(pUVM->pVM);
+ }
return rc;
}
+
+/**
+ * Changes the logger destination settings.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDestSettings The destination settings string. (VBOX_LOG_DEST)
+ * By prefixing the string with \"release:\" the
+ * changes will be applied to the release log
+ * instead of the debug log. The prefix \"debug:\"
+ * is also recognized.
+ */
+VMMR3DECL(int) DBGFR3LogModifyDestinations(PUVM pUVM, const char *pszDestSettings)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszDestSettings, VERR_INVALID_POINTER);
+
+ return VMR3ReqPriorityCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)dbgfR3LogModifyDestinations, 2, pUVM, pszDestSettings);
+}
+
diff --git a/src/VBox/VMM/VMMR3/DBGFMem.cpp b/src/VBox/VMM/VMMR3/DBGFMem.cpp
index db93d733..c601302f 100644
--- a/src/VBox/VMM/VMMR3/DBGFMem.cpp
+++ b/src/VBox/VMM/VMMR3/DBGFMem.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2007-2010 Oracle Corporation
+ * Copyright (C) 2007-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -23,9 +23,10 @@
#include <VBox/vmm/dbgf.h>
#include <VBox/vmm/pgm.h>
#include <VBox/vmm/selm.h>
-#include <VBox/vmm/hwaccm.h>
+#include <VBox/vmm/hm.h>
#include "DBGFInternal.h"
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include <VBox/err.h>
#include <VBox/log.h>
#include <VBox/vmm/mm.h>
@@ -36,7 +37,7 @@
* Scan guest memory for an exact byte string.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The ID of the CPU context to search in.
* @param pAddress Where to store the mixed address.
* @param puAlign The alignment restriction imposed on the search result.
@@ -46,16 +47,18 @@
* @param cbNeedle Size of the search byte string.
* @param pHitAddress Where to put the address of the first hit.
*/
-static DECLCALLBACK(int) dbgfR3MemScan(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, PCRTGCUINTPTR pcbRange, RTGCUINTPTR *puAlign,
- const uint8_t *pabNeedle, size_t cbNeedle, PDBGFADDRESS pHitAddress)
+static DECLCALLBACK(int) dbgfR3MemScan(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, PCRTGCUINTPTR pcbRange,
+ RTGCUINTPTR *puAlign, const uint8_t *pabNeedle, size_t cbNeedle, PDBGFADDRESS pHitAddress)
{
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
Assert(idCpu == VMMGetCpuId(pVM));
/*
* Validate the input we use, PGM does the rest.
*/
RTGCUINTPTR cbRange = *pcbRange;
- if (!DBGFR3AddrIsValid(pVM, pAddress))
+ if (!DBGFR3AddrIsValid(pUVM, pAddress))
return VERR_INVALID_POINTER;
if (!VALID_PTR(pHitAddress))
return VERR_INVALID_POINTER;
@@ -79,7 +82,7 @@ static DECLCALLBACK(int) dbgfR3MemScan(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAd
RTGCPHYS PhysHit;
rc = PGMR3DbgScanPhysical(pVM, pAddress->FlatPtr, cbRange, GCPhysAlign, pabNeedle, cbNeedle, &PhysHit);
if (RT_SUCCESS(rc))
- DBGFR3AddrFromPhys(pVM, pHitAddress, PhysHit);
+ DBGFR3AddrFromPhys(pUVM, pHitAddress, PhysHit);
}
else
{
@@ -93,7 +96,7 @@ static DECLCALLBACK(int) dbgfR3MemScan(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAd
RTGCUINTPTR GCPtrHit;
rc = PGMR3DbgScanVirtual(pVM, pVCpu, pAddress->FlatPtr, cbRange, *puAlign, pabNeedle, cbNeedle, &GCPtrHit);
if (RT_SUCCESS(rc))
- DBGFR3AddrFromFlat(pVM, pHitAddress, GCPtrHit);
+ DBGFR3AddrFromFlat(pUVM, pHitAddress, GCPtrHit);
}
return rc;
@@ -109,7 +112,7 @@ static DECLCALLBACK(int) dbgfR3MemScan(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAd
* @retval VERR_INVALID_POINTER if any of the pointer arguments are invalid.
* @retval VERR_INVALID_ARGUMENT if any other arguments are invalid.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The ID of the CPU context to search in.
* @param pAddress Where to store the mixed address.
* @param cbRange The number of bytes to scan.
@@ -121,13 +124,13 @@ static DECLCALLBACK(int) dbgfR3MemScan(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAd
*
* @thread Any thread.
*/
-VMMR3DECL(int) DBGFR3MemScan(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, RTGCUINTPTR cbRange, RTGCUINTPTR uAlign,
+VMMR3DECL(int) DBGFR3MemScan(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, RTGCUINTPTR cbRange, RTGCUINTPTR uAlign,
const void *pvNeedle, size_t cbNeedle, PDBGFADDRESS pHitAddress)
{
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
- AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
- return VMR3ReqPriorityCallWait(pVM, idCpu, (PFNRT)dbgfR3MemScan, 8,
- pVM, idCpu, pAddress, &cbRange, &uAlign, pvNeedle, cbNeedle, pHitAddress);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemScan, 8,
+ pUVM, idCpu, pAddress, &cbRange, &uAlign, pvNeedle, cbNeedle, pHitAddress);
}
@@ -136,19 +139,21 @@ VMMR3DECL(int) DBGFR3MemScan(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, RTG
* Read guest memory.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pAddress Where to start reading.
- * @param pvBuf Where to store the data we've read.
- * @param cbRead The number of bytes to read.
+ * @param pUVM The user mode VM handle.
+ * @param pAddress Where to start reading.
+ * @param pvBuf Where to store the data we've read.
+ * @param cbRead The number of bytes to read.
*/
-static DECLCALLBACK(int) dbgfR3MemRead(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void *pvBuf, size_t cbRead)
+static DECLCALLBACK(int) dbgfR3MemRead(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void *pvBuf, size_t cbRead)
{
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
Assert(idCpu == VMMGetCpuId(pVM));
/*
* Validate the input we use, PGM does the rest.
*/
- if (!DBGFR3AddrIsValid(pVM, pAddress))
+ if (!DBGFR3AddrIsValid(pUVM, pAddress))
return VERR_INVALID_POINTER;
if (!VALID_PTR(pvBuf))
return VERR_INVALID_POINTER;
@@ -196,22 +201,24 @@ static DECLCALLBACK(int) dbgfR3MemRead(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAd
*
* @returns VBox status code.
*
- * @param pVM Pointer to the VM.
- * @param idCpu The ID of the source CPU context (for the address).
- * @param pAddress Where to start reading.
- * @param pvBuf Where to store the data we've read.
- * @param cbRead The number of bytes to read.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the source CPU context (for the address).
+ * @param pAddress Where to start reading.
+ * @param pvBuf Where to store the data we've read.
+ * @param cbRead The number of bytes to read.
*/
-VMMR3DECL(int) DBGFR3MemRead(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void *pvBuf, size_t cbRead)
+VMMR3DECL(int) DBGFR3MemRead(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void *pvBuf, size_t cbRead)
{
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
- AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
+
if ((pAddress->fFlags & DBGFADDRESS_FLAGS_TYPE_MASK) == DBGFADDRESS_FLAGS_RING0)
{
AssertCompile(sizeof(RTHCUINTPTR) <= sizeof(pAddress->FlatPtr));
- return VMMR3ReadR0Stack(pVM, idCpu, (RTHCUINTPTR)pAddress->FlatPtr, pvBuf, cbRead);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ return VMMR3ReadR0Stack(pUVM->pVM, idCpu, (RTHCUINTPTR)pAddress->FlatPtr, pvBuf, cbRead);
}
- return VMR3ReqPriorityCallWait(pVM, idCpu, (PFNRT)dbgfR3MemRead, 5, pVM, idCpu, pAddress, pvBuf, cbRead);
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemRead, 5, pUVM, idCpu, pAddress, pvBuf, cbRead);
}
@@ -220,18 +227,18 @@ VMMR3DECL(int) DBGFR3MemRead(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, voi
*
* @returns VBox status code.
*
- * @param pVM Pointer to the VM.
- * @param idCpu The ID of the source CPU context (for the address).
- * @param pAddress Where to start reading.
- * @param pszBuf Where to store the string.
- * @param cchBuf The size of the buffer.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the source CPU context (for the address).
+ * @param pAddress Where to start reading.
+ * @param pszBuf Where to store the string.
+ * @param cchBuf The size of the buffer.
*/
-static DECLCALLBACK(int) dbgfR3MemReadString(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, char *pszBuf, size_t cchBuf)
+static DECLCALLBACK(int) dbgfR3MemReadString(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, char *pszBuf, size_t cchBuf)
{
/*
* Validate the input we use, PGM does the rest.
*/
- if (!DBGFR3AddrIsValid(pVM, pAddress))
+ if (!DBGFR3AddrIsValid(pUVM, pAddress))
return VERR_INVALID_POINTER;
if (!VALID_PTR(pszBuf))
return VERR_INVALID_POINTER;
@@ -239,7 +246,7 @@ static DECLCALLBACK(int) dbgfR3MemReadString(PVM pVM, VMCPUID idCpu, PCDBGFADDRE
/*
* Let dbgfR3MemRead do the job.
*/
- int rc = dbgfR3MemRead(pVM, idCpu, pAddress, pszBuf, cchBuf);
+ int rc = dbgfR3MemRead(pUVM, idCpu, pAddress, pszBuf, cchBuf);
/*
* Make sure the result is terminated and that overflow is signaled.
@@ -268,13 +275,13 @@ static DECLCALLBACK(int) dbgfR3MemReadString(PVM pVM, VMCPUID idCpu, PCDBGFADDRE
*
* @returns VBox status code.
*
- * @param pVM Pointer to the VM.
- * @param idCpu The ID of the source CPU context (for the address).
- * @param pAddress Where to start reading.
- * @param pszBuf Where to store the string.
- * @param cchBuf The size of the buffer.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the source CPU context (for the address).
+ * @param pAddress Where to start reading.
+ * @param pszBuf Where to store the string.
+ * @param cchBuf The size of the buffer.
*/
-VMMR3DECL(int) DBGFR3MemReadString(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, char *pszBuf, size_t cchBuf)
+VMMR3DECL(int) DBGFR3MemReadString(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, char *pszBuf, size_t cchBuf)
{
/*
* Validate and zero output.
@@ -284,13 +291,13 @@ VMMR3DECL(int) DBGFR3MemReadString(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAddres
if (cchBuf <= 0)
return VERR_INVALID_PARAMETER;
memset(pszBuf, 0, cchBuf);
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
- AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
/*
* Pass it on to the EMT.
*/
- return VMR3ReqPriorityCallWait(pVM, idCpu, (PFNRT)dbgfR3MemReadString, 5, pVM, idCpu, pAddress, pszBuf, cchBuf);
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemReadString, 5, pUVM, idCpu, pAddress, pszBuf, cchBuf);
}
@@ -299,21 +306,23 @@ VMMR3DECL(int) DBGFR3MemReadString(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAddres
*
* @returns VBox status code.
*
- * @param pVM Pointer to the VM.
- * @param idCpu The ID of the target CPU context (for the address).
- * @param pAddress Where to start writing.
- * @param pvBuf The data to write.
- * @param cbWrite The number of bytes to write.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the target CPU context (for the address).
+ * @param pAddress Where to start writing.
+ * @param pvBuf The data to write.
+ * @param cbWrite The number of bytes to write.
*/
-static DECLCALLBACK(int) dbgfR3MemWrite(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void const *pvBuf, size_t cbWrite)
+static DECLCALLBACK(int) dbgfR3MemWrite(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void const *pvBuf, size_t cbWrite)
{
/*
* Validate the input we use, PGM does the rest.
*/
- if (!DBGFR3AddrIsValid(pVM, pAddress))
+ if (!DBGFR3AddrIsValid(pUVM, pAddress))
return VERR_INVALID_POINTER;
if (!VALID_PTR(pvBuf))
return VERR_INVALID_POINTER;
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
/*
* HMA is always special.
@@ -356,25 +365,28 @@ static DECLCALLBACK(int) dbgfR3MemWrite(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pA
*
* @returns VBox status code.
*
- * @param pVM Pointer to the VM.
- * @param idCpu The ID of the target CPU context (for the address).
- * @param pAddress Where to start writing.
- * @param pvBuf The data to write.
- * @param cbRead The number of bytes to write.
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the target CPU context (for the address).
+ * @param pAddress Where to start writing.
+ * @param pvBuf The data to write.
+ * @param cbRead The number of bytes to write.
*/
-VMMR3DECL(int) DBGFR3MemWrite(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void const *pvBuf, size_t cbWrite)
+VMMR3DECL(int) DBGFR3MemWrite(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void const *pvBuf, size_t cbWrite)
{
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
- AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
- return VMR3ReqPriorityCallWait(pVM, idCpu, (PFNRT)dbgfR3MemWrite, 5, pVM, idCpu, pAddress, pvBuf, cbWrite);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemWrite, 5, pUVM, idCpu, pAddress, pvBuf, cbWrite);
}
/**
* Worker for DBGFR3SelQueryInfo that calls into SELM.
*/
-static DECLCALLBACK(int) dbgfR3SelQueryInfo(PVM pVM, VMCPUID idCpu, RTSEL Sel, uint32_t fFlags, PDBGFSELINFO pSelInfo)
+static DECLCALLBACK(int) dbgfR3SelQueryInfo(PUVM pUVM, VMCPUID idCpu, RTSEL Sel, uint32_t fFlags, PDBGFSELINFO pSelInfo)
{
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
/*
* Make the query.
*/
@@ -420,7 +432,7 @@ static DECLCALLBACK(int) dbgfR3SelQueryInfo(PVM pVM, VMCPUID idCpu, RTSEL Sel, u
}
else
{
- if (HWACCMIsEnabled(pVM))
+ if (HMIsEnabled(pVM))
rc = VERR_INVALID_STATE;
else
rc = SELMR3GetShadowSelectorInfo(pVM, Sel, pSelInfo);
@@ -445,7 +457,7 @@ static DECLCALLBACK(int) dbgfR3SelQueryInfo(PVM pVM, VMCPUID idCpu, RTSEL Sel, u
* @retval VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the
* pagetable or page backing the selector table wasn't present.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The ID of the virtual CPU context.
* @param Sel The selector to get info about.
* @param fFlags Flags, see DBGFQSEL_FLAGS_*.
@@ -455,10 +467,10 @@ static DECLCALLBACK(int) dbgfR3SelQueryInfo(PVM pVM, VMCPUID idCpu, RTSEL Sel, u
* @remarks This is a wrapper around SELMR3GetSelectorInfo and
* SELMR3GetShadowSelectorInfo.
*/
-VMMR3DECL(int) DBGFR3SelQueryInfo(PVM pVM, VMCPUID idCpu, RTSEL Sel, uint32_t fFlags, PDBGFSELINFO pSelInfo)
+VMMR3DECL(int) DBGFR3SelQueryInfo(PUVM pUVM, VMCPUID idCpu, RTSEL Sel, uint32_t fFlags, PDBGFSELINFO pSelInfo)
{
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
- AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
AssertReturn(!(fFlags & ~(DBGFSELQI_FLAGS_DT_GUEST | DBGFSELQI_FLAGS_DT_SHADOW | DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE)), VERR_INVALID_PARAMETER);
AssertReturn( (fFlags & (DBGFSELQI_FLAGS_DT_SHADOW | DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE))
!= (DBGFSELQI_FLAGS_DT_SHADOW | DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE), VERR_INVALID_PARAMETER);
@@ -469,7 +481,7 @@ VMMR3DECL(int) DBGFR3SelQueryInfo(PVM pVM, VMCPUID idCpu, RTSEL Sel, uint32_t fF
/*
* Dispatch the request to a worker running on the target CPU.
*/
- return VMR3ReqPriorityCallWait(pVM, idCpu, (PFNRT)dbgfR3SelQueryInfo, 5, pVM, idCpu, Sel, fFlags, pSelInfo);
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3SelQueryInfo, 5, pUVM, idCpu, Sel, fFlags, pSelInfo);
}
@@ -544,7 +556,7 @@ static uint32_t dbgfR3PagingDumpModeToFlags(PGMMODE enmMode)
* EMT worker for DBGFR3PagingDumpEx.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The shared VM handle.
* @param idCpu The current CPU ID.
* @param fFlags The flags, DBGFPGDMP_FLAGS_XXX. Valid.
* @param pcr3 The CR3 to use (unless we're getting the current
@@ -554,7 +566,7 @@ static uint32_t dbgfR3PagingDumpModeToFlags(PGMMODE enmMode)
* @param cMaxDepth The depth.
* @param pHlp The output callbacks.
*/
-static DECLCALLBACK(int) dbgfR3PagingDumpEx(PVM pVM, VMCPUID idCpu, uint32_t fFlags, uint64_t *pcr3,
+static DECLCALLBACK(int) dbgfR3PagingDumpEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, uint64_t *pcr3,
uint64_t *pu64FirstAddr, uint64_t *pu64LastAddr,
uint32_t cMaxDepth, PCDBGFINFOHLP pHlp)
{
@@ -563,13 +575,16 @@ static DECLCALLBACK(int) dbgfR3PagingDumpEx(PVM pVM, VMCPUID idCpu, uint32_t fFl
*/
if ((fFlags & (DBGFPGDMP_FLAGS_GUEST | DBGFPGDMP_FLAGS_SHADOW)) == (DBGFPGDMP_FLAGS_GUEST | DBGFPGDMP_FLAGS_SHADOW))
{
- int rc1 = dbgfR3PagingDumpEx(pVM, idCpu, fFlags & ~DBGFPGDMP_FLAGS_GUEST,
+ int rc1 = dbgfR3PagingDumpEx(pUVM, idCpu, fFlags & ~DBGFPGDMP_FLAGS_GUEST,
pcr3, pu64FirstAddr, pu64LastAddr, cMaxDepth, pHlp);
- int rc2 = dbgfR3PagingDumpEx(pVM, idCpu, fFlags & ~DBGFPGDMP_FLAGS_SHADOW,
+ int rc2 = dbgfR3PagingDumpEx(pUVM, idCpu, fFlags & ~DBGFPGDMP_FLAGS_SHADOW,
pcr3, pu64FirstAddr, pu64LastAddr, cMaxDepth, pHlp);
return RT_FAILURE(rc1) ? rc1 : rc2;
}
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
/*
* Get the current CR3/mode if required.
*/
@@ -625,7 +640,7 @@ static DECLCALLBACK(int) dbgfR3PagingDumpEx(PVM pVM, VMCPUID idCpu, uint32_t fFl
* This API can be used to dump both guest and shadow structures.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The current CPU ID.
* @param fFlags The flags, DBGFPGDMP_FLAGS_XXX.
* @param cr3 The CR3 to use (unless we're getting the current
@@ -636,14 +651,14 @@ static DECLCALLBACK(int) dbgfR3PagingDumpEx(PVM pVM, VMCPUID idCpu, uint32_t fFl
* @param pHlp The output callbacks. Defaults to the debug log if
* NULL.
*/
-VMMDECL(int) DBGFR3PagingDumpEx(PVM pVM, VMCPUID idCpu, uint32_t fFlags, uint64_t cr3, uint64_t u64FirstAddr,
+VMMDECL(int) DBGFR3PagingDumpEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, uint64_t cr3, uint64_t u64FirstAddr,
uint64_t u64LastAddr, uint32_t cMaxDepth, PCDBGFINFOHLP pHlp)
{
/*
* Input validation.
*/
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
- AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
AssertReturn(!(fFlags & ~DBGFPGDMP_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
AssertReturn(fFlags & (DBGFPGDMP_FLAGS_SHADOW | DBGFPGDMP_FLAGS_GUEST), VERR_INVALID_PARAMETER);
AssertReturn((fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE) || !(fFlags & DBGFPGDMP_FLAGS_MODE_MASK), VERR_INVALID_PARAMETER);
@@ -656,7 +671,7 @@ VMMDECL(int) DBGFR3PagingDumpEx(PVM pVM, VMCPUID idCpu, uint32_t fFlags, uint64_
/*
* Forward the request to the target CPU.
*/
- return VMR3ReqPriorityCallWait(pVM, idCpu, (PFNRT)dbgfR3PagingDumpEx, 8,
- pVM, idCpu, fFlags, &cr3, &u64FirstAddr, &u64LastAddr, cMaxDepth, pHlp);
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3PagingDumpEx, 8,
+ pUVM, idCpu, fFlags, &cr3, &u64FirstAddr, &u64LastAddr, cMaxDepth, pHlp);
}
diff --git a/src/VBox/VMM/VMMR3/DBGFModule.cpp b/src/VBox/VMM/VMMR3/DBGFModule.cpp
index d17cbf65..2813a619 100644
--- a/src/VBox/VMM/VMMR3/DBGFModule.cpp
+++ b/src/VBox/VMM/VMMR3/DBGFModule.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2008 Oracle Corporation
+ * Copyright (C) 2008-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
diff --git a/src/VBox/VMM/VMMR3/DBGFOS.cpp b/src/VBox/VMM/VMMR3/DBGFOS.cpp
index 0cc90a6a..902432e4 100644
--- a/src/VBox/VMM/VMMR3/DBGFOS.cpp
+++ b/src/VBox/VMM/VMMR3/DBGFOS.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2008 Oracle Corporation
+ * Copyright (C) 2008-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -23,7 +23,7 @@
#include <VBox/vmm/dbgf.h>
#include <VBox/vmm/mm.h>
#include "DBGFInternal.h"
-#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include <VBox/err.h>
#include <VBox/log.h>
@@ -35,38 +35,38 @@
/*******************************************************************************
* Defined Constants And Macros *
*******************************************************************************/
-#define DBGF_OS_READ_LOCK(pVM) do { } while (0)
-#define DBGF_OS_READ_UNLOCK(pVM) do { } while (0)
+#define DBGF_OS_READ_LOCK(pUVM) do { } while (0)
+#define DBGF_OS_READ_UNLOCK(pUVM) do { } while (0)
-#define DBGF_OS_WRITE_LOCK(pVM) do { } while (0)
-#define DBGF_OS_WRITE_UNLOCK(pVM) do { } while (0)
+#define DBGF_OS_WRITE_LOCK(pUVM) do { } while (0)
+#define DBGF_OS_WRITE_UNLOCK(pUVM) do { } while (0)
/**
* Internal cleanup routine called by DBGFR3Term().
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
*/
-void dbgfR3OSTerm(PVM pVM)
+void dbgfR3OSTerm(PUVM pUVM)
{
/*
* Terminate the current one.
*/
- if (pVM->dbgf.s.pCurOS)
+ if (pUVM->dbgf.s.pCurOS)
{
- pVM->dbgf.s.pCurOS->pReg->pfnTerm(pVM, pVM->dbgf.s.pCurOS->abData);
- pVM->dbgf.s.pCurOS = NULL;
+ pUVM->dbgf.s.pCurOS->pReg->pfnTerm(pUVM, pUVM->dbgf.s.pCurOS->abData);
+ pUVM->dbgf.s.pCurOS = NULL;
}
/*
* Destroy all the instances.
*/
- while (pVM->dbgf.s.pOSHead)
+ while (pUVM->dbgf.s.pOSHead)
{
- PDBGFOS pOS = pVM->dbgf.s.pOSHead;
- pVM->dbgf.s.pOSHead = pOS->pNext;
+ PDBGFOS pOS = pUVM->dbgf.s.pOSHead;
+ pUVM->dbgf.s.pOSHead = pOS->pNext;
if (pOS->pReg->pfnDestruct)
- pOS->pReg->pfnDestruct(pVM, pOS->abData);
+ pOS->pReg->pfnDestruct(pUVM, pOS->abData);
MMR3HeapFree(pOS);
}
}
@@ -76,18 +76,18 @@ void dbgfR3OSTerm(PVM pVM)
* EMT worker function for DBGFR3OSRegister.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pReg The registration structure.
*/
-static DECLCALLBACK(int) dbgfR3OSRegister(PVM pVM, PDBGFOSREG pReg)
+static DECLCALLBACK(int) dbgfR3OSRegister(PUVM pUVM, PDBGFOSREG pReg)
{
/* more validations. */
- DBGF_OS_READ_LOCK(pVM);
+ DBGF_OS_READ_LOCK(pUVM);
PDBGFOS pOS;
- for (pOS = pVM->dbgf.s.pOSHead; pOS; pOS = pOS->pNext)
+ for (pOS = pUVM->dbgf.s.pOSHead; pOS; pOS = pOS->pNext)
if (!strcmp(pOS->pReg->szName, pReg->szName))
{
- DBGF_OS_READ_UNLOCK(pVM);
+ DBGF_OS_READ_UNLOCK(pUVM);
Log(("dbgfR3OSRegister: %s -> VERR_ALREADY_LOADED\n", pReg->szName));
return VERR_ALREADY_LOADED;
}
@@ -95,22 +95,22 @@ static DECLCALLBACK(int) dbgfR3OSRegister(PVM pVM, PDBGFOSREG pReg)
/*
* Allocate a new structure, call the constructor and link it into the list.
*/
- pOS = (PDBGFOS)MMR3HeapAllocZ(pVM, MM_TAG_DBGF_OS, RT_OFFSETOF(DBGFOS, abData[pReg->cbData]));
+ pOS = (PDBGFOS)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_OS, RT_OFFSETOF(DBGFOS, abData[pReg->cbData]));
AssertReturn(pOS, VERR_NO_MEMORY);
pOS->pReg = pReg;
- int rc = pOS->pReg->pfnConstruct(pVM, pOS->abData);
+ int rc = pOS->pReg->pfnConstruct(pUVM, pOS->abData);
if (RT_SUCCESS(rc))
{
- DBGF_OS_WRITE_LOCK(pVM);
- pOS->pNext = pVM->dbgf.s.pOSHead;
- pVM->dbgf.s.pOSHead = pOS;
- DBGF_OS_WRITE_UNLOCK(pVM);
+ DBGF_OS_WRITE_LOCK(pUVM);
+ pOS->pNext = pUVM->dbgf.s.pOSHead;
+ pUVM->dbgf.s.pOSHead = pOS;
+ DBGF_OS_WRITE_UNLOCK(pUVM);
}
else
{
if (pOS->pReg->pfnDestruct)
- pOS->pReg->pfnDestruct(pVM, pOS->abData);
+ pOS->pReg->pfnDestruct(pUVM, pOS->abData);
MMR3HeapFree(pOS);
}
@@ -125,16 +125,16 @@ static DECLCALLBACK(int) dbgfR3OSRegister(PVM pVM, PDBGFOSREG pReg)
* to the list for us in the next call to DBGFR3OSDetect().
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pReg The registration structure.
* @thread Any.
*/
-VMMR3DECL(int) DBGFR3OSRegister(PVM pVM, PCDBGFOSREG pReg)
+VMMR3DECL(int) DBGFR3OSRegister(PUVM pUVM, PCDBGFOSREG pReg)
{
/*
* Validate intput.
*/
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
AssertPtrReturn(pReg, VERR_INVALID_POINTER);
AssertReturn(pReg->u32Magic == DBGFOSREG_MAGIC, VERR_INVALID_MAGIC);
@@ -155,7 +155,7 @@ VMMR3DECL(int) DBGFR3OSRegister(PVM pVM, PCDBGFOSREG pReg)
/*
* Pass it on to EMT(0).
*/
- return VMR3ReqPriorityCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)dbgfR3OSRegister, 2, pVM, pReg);
+ return VMR3ReqPriorityCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)dbgfR3OSRegister, 2, pUVM, pReg);
}
@@ -163,10 +163,10 @@ VMMR3DECL(int) DBGFR3OSRegister(PVM pVM, PCDBGFOSREG pReg)
* EMT worker function for DBGFR3OSDeregister.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pReg The registration structure.
*/
-static DECLCALLBACK(int) dbgfR3OSDeregister(PVM pVM, PDBGFOSREG pReg)
+static DECLCALLBACK(int) dbgfR3OSDeregister(PUVM pUVM, PDBGFOSREG pReg)
{
/*
* Unlink it.
@@ -174,22 +174,22 @@ static DECLCALLBACK(int) dbgfR3OSDeregister(PVM pVM, PDBGFOSREG pReg)
bool fWasCurOS = false;
PDBGFOS pOSPrev = NULL;
PDBGFOS pOS;
- DBGF_OS_WRITE_LOCK(pVM);
- for (pOS = pVM->dbgf.s.pOSHead; pOS; pOSPrev = pOS, pOS = pOS->pNext)
+ DBGF_OS_WRITE_LOCK(pUVM);
+ for (pOS = pUVM->dbgf.s.pOSHead; pOS; pOSPrev = pOS, pOS = pOS->pNext)
if (pOS->pReg == pReg)
{
if (pOSPrev)
pOSPrev->pNext = pOS->pNext;
else
- pVM->dbgf.s.pOSHead = pOS->pNext;
- if (pVM->dbgf.s.pCurOS == pOS)
+ pUVM->dbgf.s.pOSHead = pOS->pNext;
+ if (pUVM->dbgf.s.pCurOS == pOS)
{
- pVM->dbgf.s.pCurOS = NULL;
+ pUVM->dbgf.s.pCurOS = NULL;
fWasCurOS = true;
}
break;
}
- DBGF_OS_WRITE_UNLOCK(pVM);
+ DBGF_OS_WRITE_UNLOCK(pUVM);
if (!pOS)
{
Log(("DBGFR3OSDeregister: %s -> VERR_NOT_FOUND\n", pReg->szName));
@@ -201,9 +201,9 @@ static DECLCALLBACK(int) dbgfR3OSDeregister(PVM pVM, PDBGFOSREG pReg)
* destructor and clean up.
*/
if (fWasCurOS)
- pOS->pReg->pfnTerm(pVM, pOS->abData);
+ pOS->pReg->pfnTerm(pUVM, pOS->abData);
if (pOS->pReg->pfnDestruct)
- pOS->pReg->pfnDestruct(pVM, pOS->abData);
+ pOS->pReg->pfnDestruct(pUVM, pOS->abData);
MMR3HeapFree(pOS);
return VINF_SUCCESS;
@@ -215,27 +215,27 @@ static DECLCALLBACK(int) dbgfR3OSDeregister(PVM pVM, PDBGFOSREG pReg)
*
* @returns VBox status code.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pReg The registration structure.
* @thread Any.
*/
-VMMR3DECL(int) DBGFR3OSDeregister(PVM pVM, PCDBGFOSREG pReg)
+VMMR3DECL(int) DBGFR3OSDeregister(PUVM pUVM, PCDBGFOSREG pReg)
{
/*
* Validate input.
*/
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
AssertPtrReturn(pReg, VERR_INVALID_POINTER);
AssertReturn(pReg->u32Magic == DBGFOSREG_MAGIC, VERR_INVALID_MAGIC);
AssertReturn(pReg->u32EndMagic == DBGFOSREG_MAGIC, VERR_INVALID_MAGIC);
AssertReturn(RTStrEnd(&pReg->szName[0], sizeof(pReg->szName)), VERR_INVALID_NAME);
- DBGF_OS_READ_LOCK(pVM);
+ DBGF_OS_READ_LOCK(pUVM);
PDBGFOS pOS;
- for (pOS = pVM->dbgf.s.pOSHead; pOS; pOS = pOS->pNext)
+ for (pOS = pUVM->dbgf.s.pOSHead; pOS; pOS = pOS->pNext)
if (pOS->pReg == pReg)
break;
- DBGF_OS_READ_LOCK(pVM);
+ DBGF_OS_READ_LOCK(pUVM);
if (!pOS)
{
@@ -246,7 +246,7 @@ VMMR3DECL(int) DBGFR3OSDeregister(PVM pVM, PCDBGFOSREG pReg)
/*
* Pass it on to EMT(0).
*/
- return VMR3ReqPriorityCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)dbgfR3OSDeregister, 2, pVM, pReg);
+ return VMR3ReqPriorityCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)dbgfR3OSDeregister, 2, pUVM, pReg);
}
@@ -257,30 +257,30 @@ VMMR3DECL(int) DBGFR3OSDeregister(PVM pVM, PCDBGFOSREG pReg)
* @retval VINF_SUCCESS if successfully detected.
* @retval VINF_DBGF_OS_NOT_DETCTED if we cannot figure it out.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszName Where to store the OS name. Empty string if not detected.
* @param cchName Size of the buffer.
*/
-static DECLCALLBACK(int) dbgfR3OSDetect(PVM pVM, char *pszName, size_t cchName)
+static DECLCALLBACK(int) dbgfR3OSDetect(PUVM pUVM, char *pszName, size_t cchName)
{
/*
* Cycle thru the detection routines.
*/
- PDBGFOS const pOldOS = pVM->dbgf.s.pCurOS;
- pVM->dbgf.s.pCurOS = NULL;
+ PDBGFOS const pOldOS = pUVM->dbgf.s.pCurOS;
+ pUVM->dbgf.s.pCurOS = NULL;
- for (PDBGFOS pNewOS = pVM->dbgf.s.pOSHead; pNewOS; pNewOS = pNewOS->pNext)
- if (pNewOS->pReg->pfnProbe(pVM, pNewOS->abData))
+ for (PDBGFOS pNewOS = pUVM->dbgf.s.pOSHead; pNewOS; pNewOS = pNewOS->pNext)
+ if (pNewOS->pReg->pfnProbe(pUVM, pNewOS->abData))
{
int rc;
- pVM->dbgf.s.pCurOS = pNewOS;
+ pUVM->dbgf.s.pCurOS = pNewOS;
if (pOldOS == pNewOS)
- rc = pNewOS->pReg->pfnRefresh(pVM, pNewOS->abData);
+ rc = pNewOS->pReg->pfnRefresh(pUVM, pNewOS->abData);
else
{
if (pOldOS)
- pOldOS->pReg->pfnTerm(pVM, pNewOS->abData);
- rc = pNewOS->pReg->pfnInit(pVM, pNewOS->abData);
+ pOldOS->pReg->pfnTerm(pUVM, pNewOS->abData);
+ rc = pNewOS->pReg->pfnInit(pUVM, pNewOS->abData);
}
if (pszName && cchName)
strncat(pszName, pNewOS->pReg->szName, cchName);
@@ -289,7 +289,7 @@ static DECLCALLBACK(int) dbgfR3OSDetect(PVM pVM, char *pszName, size_t cchName)
/* not found */
if (pOldOS)
- pOldOS->pReg->pfnTerm(pVM, pOldOS->abData);
+ pOldOS->pReg->pfnTerm(pUVM, pOldOS->abData);
return VINF_DBGF_OS_NOT_DETCTED;
}
@@ -304,21 +304,22 @@ static DECLCALLBACK(int) dbgfR3OSDetect(PVM pVM, char *pszName, size_t cchName)
* @retval VINF_SUCCESS if successfully detected.
* @retval VINF_DBGF_OS_NOT_DETCTED if we cannot figure it out.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszName Where to store the OS name. Empty string if not detected.
* @param cchName Size of the buffer.
* @thread Any.
*/
-VMMR3DECL(int) DBGFR3OSDetect(PVM pVM, char *pszName, size_t cchName)
+VMMR3DECL(int) DBGFR3OSDetect(PUVM pUVM, char *pszName, size_t cchName)
{
AssertPtrNullReturn(pszName, VERR_INVALID_POINTER);
if (pszName && cchName)
*pszName = '\0';
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
/*
* Pass it on to EMT(0).
*/
- return VMR3ReqPriorityCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)dbgfR3OSDetect, 3, pVM, pszName, cchName);
+ return VMR3ReqPriorityCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)dbgfR3OSDetect, 3, pUVM, pszName, cchName);
}
@@ -326,28 +327,28 @@ VMMR3DECL(int) DBGFR3OSDetect(PVM pVM, char *pszName, size_t cchName)
* EMT worker function for DBGFR3OSQueryNameAndVersion
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszName Where to store the OS name. Optional.
* @param cchName The size of the name buffer.
* @param pszVersion Where to store the version string. Optional.
* @param cchVersion The size of the version buffer.
*/
-static DECLCALLBACK(int) dbgfR3OSQueryNameAndVersion(PVM pVM, char *pszName, size_t cchName, char *pszVersion, size_t cchVersion)
+static DECLCALLBACK(int) dbgfR3OSQueryNameAndVersion(PUVM pUVM, char *pszName, size_t cchName, char *pszVersion, size_t cchVersion)
{
/*
* Any known OS?
*/
- if (pVM->dbgf.s.pCurOS)
+ if (pUVM->dbgf.s.pCurOS)
{
int rc = VINF_SUCCESS;
if (pszName && cchName)
{
- size_t cch = strlen(pVM->dbgf.s.pCurOS->pReg->szName);
+ size_t cch = strlen(pUVM->dbgf.s.pCurOS->pReg->szName);
if (cchName > cch)
- memcpy(pszName, pVM->dbgf.s.pCurOS->pReg->szName, cch + 1);
+ memcpy(pszName, pUVM->dbgf.s.pCurOS->pReg->szName, cch + 1);
else
{
- memcpy(pszName, pVM->dbgf.s.pCurOS->pReg->szName, cchName - 1);
+ memcpy(pszName, pUVM->dbgf.s.pCurOS->pReg->szName, cchName - 1);
pszName[cchName - 1] = '\0';
rc = VINF_BUFFER_OVERFLOW;
}
@@ -355,7 +356,7 @@ static DECLCALLBACK(int) dbgfR3OSQueryNameAndVersion(PVM pVM, char *pszName, siz
if (pszVersion && cchVersion)
{
- int rc2 = pVM->dbgf.s.pCurOS->pReg->pfnQueryVersion(pVM, pVM->dbgf.s.pCurOS->abData, pszVersion, cchVersion);
+ int rc2 = pUVM->dbgf.s.pCurOS->pReg->pfnQueryVersion(pUVM, pUVM->dbgf.s.pCurOS->abData, pszVersion, cchVersion);
if (RT_FAILURE(rc2) || rc == VINF_SUCCESS)
rc = rc2;
}
@@ -373,15 +374,16 @@ static DECLCALLBACK(int) dbgfR3OSQueryNameAndVersion(PVM pVM, char *pszName, siz
* guest OS digger and not additions or user configuration.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszName Where to store the OS name. Optional.
* @param cchName The size of the name buffer.
* @param pszVersion Where to store the version string. Optional.
* @param cchVersion The size of the version buffer.
* @thread Any.
*/
-VMMR3DECL(int) DBGFR3OSQueryNameAndVersion(PVM pVM, char *pszName, size_t cchName, char *pszVersion, size_t cchVersion)
+VMMR3DECL(int) DBGFR3OSQueryNameAndVersion(PUVM pUVM, char *pszName, size_t cchName, char *pszVersion, size_t cchVersion)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
AssertPtrNullReturn(pszName, VERR_INVALID_POINTER);
AssertPtrNullReturn(pszVersion, VERR_INVALID_POINTER);
@@ -396,23 +398,23 @@ VMMR3DECL(int) DBGFR3OSQueryNameAndVersion(PVM pVM, char *pszName, size_t cchNam
/*
* Pass it on to EMT(0).
*/
- return VMR3ReqPriorityCallWait(pVM, 0 /*idDstCpu*/,
- (PFNRT)dbgfR3OSQueryNameAndVersion, 5, pVM, pszName, cchName, pszVersion, cchVersion);
+ return VMR3ReqPriorityCallWaitU(pUVM, 0 /*idDstCpu*/,
+ (PFNRT)dbgfR3OSQueryNameAndVersion, 5, pUVM, pszName, cchName, pszVersion, cchVersion);
}
/**
* EMT worker for DBGFR3OSQueryInterface.
*
- * @param pVM Pointer to the VM.
- * @param enmIf The interface identifier.
- * @param ppvIf Where to store the interface pointer on success.
+ * @param pUVM The user mode VM handle.
+ * @param enmIf The interface identifier.
+ * @param ppvIf Where to store the interface pointer on success.
*/
-static DECLCALLBACK(void) dbgfR3OSQueryInterface(PVM pVM, DBGFOSINTERFACE enmIf, void **ppvIf)
+static DECLCALLBACK(void) dbgfR3OSQueryInterface(PUVM pUVM, DBGFOSINTERFACE enmIf, void **ppvIf)
{
- if (pVM->dbgf.s.pCurOS)
+ if (pUVM->dbgf.s.pCurOS)
{
- *ppvIf = pVM->dbgf.s.pCurOS->pReg->pfnQueryInterface(pVM, pVM->dbgf.s.pCurOS->abData, enmIf);
+ *ppvIf = pUVM->dbgf.s.pCurOS->pReg->pfnQueryInterface(pUVM, pUVM->dbgf.s.pCurOS->abData, enmIf);
if (*ppvIf)
{
/** @todo Create EMT wrapper for the returned interface once we've defined one...
@@ -429,11 +431,11 @@ static DECLCALLBACK(void) dbgfR3OSQueryInterface(PVM pVM, DBGFOSINTERFACE enmIf,
*
* @returns Pointer to the digger interface on success, NULL if the interfaces isn't
* available or no active guest OS digger.
- * @param pVM Pointer to the VM.
- * @param enmIf The interface identifier.
+ * @param pUVM The user mode VM handle.
+ * @param enmIf The interface identifier.
* @thread Any.
*/
-VMMR3DECL(void *) DBGFR3OSQueryInterface(PVM pVM, DBGFOSINTERFACE enmIf)
+VMMR3DECL(void *) DBGFR3OSQueryInterface(PUVM pUVM, DBGFOSINTERFACE enmIf)
{
AssertMsgReturn(enmIf > DBGFOSINTERFACE_INVALID && enmIf < DBGFOSINTERFACE_END, ("%d\n", enmIf), NULL);
@@ -441,7 +443,7 @@ VMMR3DECL(void *) DBGFR3OSQueryInterface(PVM pVM, DBGFOSINTERFACE enmIf)
* Pass it on to an EMT.
*/
void *pvIf = NULL;
- VMR3ReqPriorityCallVoidWait(pVM, VMCPUID_ANY, (PFNRT)dbgfR3OSQueryInterface, 3, pVM, enmIf, &pvIf);
+ VMR3ReqPriorityCallVoidWaitU(pUVM, VMCPUID_ANY, (PFNRT)dbgfR3OSQueryInterface, 3, pUVM, enmIf, &pvIf);
return pvIf;
}
diff --git a/src/VBox/VMM/VMMR3/DBGFR3Trace.cpp b/src/VBox/VMM/VMMR3/DBGFR3Trace.cpp
index 1c5ace18..e3c77c8e 100644
--- a/src/VBox/VMM/VMMR3/DBGFR3Trace.cpp
+++ b/src/VBox/VMM/VMMR3/DBGFR3Trace.cpp
@@ -150,8 +150,8 @@ int dbgfR3TraceInit(PVM pVM)
*/
PCFGMNODE pDbgfNode = CFGMR3GetChild(CFGMR3GetRoot(pVM), "DBGF");
#if defined(DEBUG) || defined(RTTRACE_ENABLED)
- bool const fDefault = true;
- const char * const pszConfigDefault = "all";
+ bool const fDefault = false;
+ const char * const pszConfigDefault = "";
#else
bool const fDefault = false;
const char * const pszConfigDefault = "";
diff --git a/src/VBox/VMM/VMMR3/DBGFReg.cpp b/src/VBox/VMM/VMMR3/DBGFReg.cpp
index a5990dda..01228264 100644
--- a/src/VBox/VMM/VMMR3/DBGFReg.cpp
+++ b/src/VBox/VMM/VMMR3/DBGFReg.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2010-2011 Oracle Corporation
+ * Copyright (C) 2010-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -24,6 +24,7 @@
#include "DBGFInternal.h"
#include <VBox/vmm/mm.h>
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include <VBox/param.h>
#include <VBox/err.h>
#include <VBox/log.h>
@@ -36,30 +37,30 @@
* Defined Constants And Macros *
*******************************************************************************/
/** Locks the register database for writing. */
-#define DBGF_REG_DB_LOCK_WRITE(pVM) \
+#define DBGF_REG_DB_LOCK_WRITE(pUVM) \
do { \
- int rcSem = RTSemRWRequestWrite((pVM)->dbgf.s.hRegDbLock, RT_INDEFINITE_WAIT); \
+ int rcSem = RTSemRWRequestWrite((pUVM)->dbgf.s.hRegDbLock, RT_INDEFINITE_WAIT); \
AssertRC(rcSem); \
} while (0)
/** Unlocks the register database after writing. */
-#define DBGF_REG_DB_UNLOCK_WRITE(pVM) \
+#define DBGF_REG_DB_UNLOCK_WRITE(pUVM) \
do { \
- int rcSem = RTSemRWReleaseWrite((pVM)->dbgf.s.hRegDbLock); \
+ int rcSem = RTSemRWReleaseWrite((pUVM)->dbgf.s.hRegDbLock); \
AssertRC(rcSem); \
} while (0)
/** Locks the register database for reading. */
-#define DBGF_REG_DB_LOCK_READ(pVM) \
+#define DBGF_REG_DB_LOCK_READ(pUVM) \
do { \
- int rcSem = RTSemRWRequestRead((pVM)->dbgf.s.hRegDbLock, RT_INDEFINITE_WAIT); \
+ int rcSem = RTSemRWRequestRead((pUVM)->dbgf.s.hRegDbLock, RT_INDEFINITE_WAIT); \
AssertRC(rcSem); \
} while (0)
/** Unlocks the register database after reading. */
-#define DBGF_REG_DB_UNLOCK_READ(pVM) \
+#define DBGF_REG_DB_UNLOCK_READ(pUVM) \
do { \
- int rcSem = RTSemRWReleaseRead((pVM)->dbgf.s.hRegDbLock); \
+ int rcSem = RTSemRWReleaseRead((pUVM)->dbgf.s.hRegDbLock); \
AssertRC(rcSem); \
} while (0)
@@ -160,7 +161,8 @@ typedef struct DBGFR3REGNMQUERYALLARGS
PDBGFREGENTRYNM paRegs;
/** The number of entries in the output array. */
size_t cRegs;
- /** The current register number when enumerating the string space. */
+ /** The current register number when enumerating the string space.
+ * @remarks Only used by EMT(0). */
size_t iReg;
} DBGFR3REGNMQUERYALLARGS;
/** Pointer to a dbgfR3RegNmQueryAllWorker argument packet. */
@@ -173,8 +175,8 @@ typedef DBGFR3REGNMQUERYALLARGS *PDBGFR3REGNMQUERYALLARGS;
*/
typedef struct DBGFR3REGPRINTFARGS
{
- /** Pointer to the VM. */
- PVM pVM;
+ /** The user mode VM handle. */
+ PUVM pUVM;
/** The target CPU. */
VMCPUID idCpu;
/** Set if we're looking at guest registers. */
@@ -203,15 +205,15 @@ typedef DBGFR3REGPRINTFARGS *PDBGFR3REGPRINTFARGS;
* Initializes the register database.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
*/
-int dbgfR3RegInit(PVM pVM)
+int dbgfR3RegInit(PUVM pUVM)
{
- int rc = VINF_SUCCESS;
- if (!pVM->dbgf.s.fRegDbInitialized)
+ int rc = VINF_SUCCESS;
+ if (!pUVM->dbgf.s.fRegDbInitialized)
{
- rc = RTSemRWCreate(&pVM->dbgf.s.hRegDbLock);
- pVM->dbgf.s.fRegDbInitialized = RT_SUCCESS(rc);
+ rc = RTSemRWCreate(&pUVM->dbgf.s.hRegDbLock);
+ pUVM->dbgf.s.fRegDbInitialized = RT_SUCCESS(rc);
}
return rc;
}
@@ -220,13 +222,13 @@ int dbgfR3RegInit(PVM pVM)
/**
* Terminates the register database.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
*/
-void dbgfR3RegTerm(PVM pVM)
+void dbgfR3RegTerm(PUVM pUVM)
{
- RTSemRWDestroy(pVM->dbgf.s.hRegDbLock);
- pVM->dbgf.s.hRegDbLock = NIL_RTSEMRW;
- pVM->dbgf.s.fRegDbInitialized = false;
+ RTSemRWDestroy(pUVM->dbgf.s.hRegDbLock);
+ pUVM->dbgf.s.hRegDbLock = NIL_RTSEMRW;
+ pUVM->dbgf.s.fRegDbInitialized = false;
}
@@ -261,7 +263,7 @@ static bool dbgfR3RegIsNameValid(const char *pszName, char chDot)
* Common worker for registering a register set.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param paRegisters The register descriptors.
* @param enmType The set type.
* @param pvUserArg The user argument for the callbacks.
@@ -269,7 +271,7 @@ static bool dbgfR3RegIsNameValid(const char *pszName, char chDot)
* @param iInstance The instance number to be appended to @a
* pszPrefix when creating the set name.
*/
-static int dbgfR3RegRegisterCommon(PVM pVM, PCDBGFREGDESC paRegisters, DBGFREGSETTYPE enmType, void *pvUserArg,
+static int dbgfR3RegRegisterCommon(PUVM pUVM, PCDBGFREGDESC paRegisters, DBGFREGSETTYPE enmType, void *pvUserArg,
const char *pszPrefix, uint32_t iInstance)
{
/*
@@ -303,7 +305,8 @@ static int dbgfR3RegRegisterCommon(PVM pVM, PCDBGFREGDESC paRegisters, DBGFREGSE
("%#x (#%u)\n", paRegisters[iDesc].fFlags, iDesc),
VERR_INVALID_PARAMETER);
AssertPtrReturn(paRegisters[iDesc].pfnGet, VERR_INVALID_PARAMETER);
- AssertPtrReturn(paRegisters[iDesc].pfnSet, VERR_INVALID_PARAMETER);
+ AssertReturn(RT_VALID_PTR(paRegisters[iDesc].pfnSet) || (paRegisters[iDesc].fFlags & DBGFREG_FLAGS_READ_ONLY),
+ VERR_INVALID_PARAMETER);
uint32_t iAlias = 0;
PCDBGFREGALIAS paAliases = paRegisters[iDesc].paAliases;
@@ -337,7 +340,7 @@ static int dbgfR3RegRegisterCommon(PVM pVM, PCDBGFREGDESC paRegisters, DBGFREGSE
}
/* Check the instance number of the CPUs. */
- AssertReturn(enmType != DBGFREGSETTYPE_CPU || iInstance < pVM->cCpus, VERR_INVALID_CPU_ID);
+ AssertReturn(enmType != DBGFREGSETTYPE_CPU || iInstance < pUVM->cCpus, VERR_INVALID_CPU_ID);
/*
* Allocate a new record and all associated lookup records.
@@ -347,7 +350,7 @@ static int dbgfR3RegRegisterCommon(PVM pVM, PCDBGFREGDESC paRegisters, DBGFREGSE
size_t const offLookupRecArray = cbRegSet;
cbRegSet += cLookupRecs * sizeof(DBGFREGLOOKUP);
- PDBGFREGSET pRegSet = (PDBGFREGSET)MMR3HeapAllocZ(pVM, MM_TAG_DBGF_REG, cbRegSet);
+ PDBGFREGSET pRegSet = (PDBGFREGSET)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_REG, cbRegSet);
if (!pRegSet)
return VERR_NO_MEMORY;
@@ -381,7 +384,7 @@ static int dbgfR3RegRegisterCommon(PVM pVM, PCDBGFREGDESC paRegisters, DBGFREGSE
for (iDesc = 0; paRegisters[iDesc].pszName != NULL && RT_SUCCESS(rc); iDesc++)
{
strcpy(pszReg, paRegisters[iDesc].pszName);
- pLookupRec->Core.pszString = MMR3HeapStrDup(pVM, MM_TAG_DBGF_REG, szName);
+ pLookupRec->Core.pszString = MMR3HeapStrDupU(pUVM, MM_TAG_DBGF_REG, szName);
if (!pLookupRec->Core.pszString)
rc = VERR_NO_STR_MEMORY;
pLookupRec->pSet = pRegSet;
@@ -410,7 +413,7 @@ static int dbgfR3RegRegisterCommon(PVM pVM, PCDBGFREGDESC paRegisters, DBGFREGSE
for (uint32_t iSubField = 0; paSubFields[iSubField].pszName && RT_SUCCESS(rc); iSubField++)
{
strcpy(pszSub, paSubFields[iSubField].pszName);
- pLookupRec->Core.pszString = MMR3HeapStrDup(pVM, MM_TAG_DBGF_REG, szName);
+ pLookupRec->Core.pszString = MMR3HeapStrDupU(pUVM, MM_TAG_DBGF_REG, szName);
if (!pLookupRec->Core.pszString)
rc = VERR_NO_STR_MEMORY;
pLookupRec->pSet = pRegSet;
@@ -431,7 +434,7 @@ static int dbgfR3RegRegisterCommon(PVM pVM, PCDBGFREGDESC paRegisters, DBGFREGSE
/* The alias record. */
strcpy(pszReg, pszRegName);
- pLookupRec->Core.pszString = MMR3HeapStrDup(pVM, MM_TAG_DBGF_REG, szName);
+ pLookupRec->Core.pszString = MMR3HeapStrDupU(pUVM, MM_TAG_DBGF_REG, szName);
if (!pLookupRec->Core.pszString)
rc = VERR_NO_STR_MEMORY;
pLookupRec->pSet = pRegSet;
@@ -449,35 +452,35 @@ static int dbgfR3RegRegisterCommon(PVM pVM, PCDBGFREGDESC paRegisters, DBGFREGSE
* Insert the record into the register set string space and optionally into
* the CPU register set cache.
*/
- DBGF_REG_DB_LOCK_WRITE(pVM);
+ DBGF_REG_DB_LOCK_WRITE(pUVM);
- bool fInserted = RTStrSpaceInsert(&pVM->dbgf.s.RegSetSpace, &pRegSet->Core);
+ bool fInserted = RTStrSpaceInsert(&pUVM->dbgf.s.RegSetSpace, &pRegSet->Core);
if (fInserted)
{
- pVM->dbgf.s.cRegs += pRegSet->cDescs;
+ pUVM->dbgf.s.cRegs += pRegSet->cDescs;
if (enmType == DBGFREGSETTYPE_CPU)
{
if (pRegSet->cDescs > DBGFREG_ALL_COUNT)
- pVM->dbgf.s.cRegs -= pRegSet->cDescs - DBGFREG_ALL_COUNT;
+ pUVM->dbgf.s.cRegs -= pRegSet->cDescs - DBGFREG_ALL_COUNT;
if (!strcmp(pszPrefix, "cpu"))
- pVM->aCpus[iInstance].dbgf.s.pGuestRegSet = pRegSet;
+ pUVM->aCpus[iInstance].dbgf.s.pGuestRegSet = pRegSet;
else
- pVM->aCpus[iInstance].dbgf.s.pHyperRegSet = pRegSet;
+ pUVM->aCpus[iInstance].dbgf.s.pHyperRegSet = pRegSet;
}
PDBGFREGLOOKUP paLookupRecs = pRegSet->paLookupRecs;
uint32_t iLookupRec = pRegSet->cLookupRecs;
while (iLookupRec-- > 0)
{
- bool fInserted2 = RTStrSpaceInsert(&pVM->dbgf.s.RegSpace, &paLookupRecs[iLookupRec].Core);
+ bool fInserted2 = RTStrSpaceInsert(&pUVM->dbgf.s.RegSpace, &paLookupRecs[iLookupRec].Core);
AssertMsg(fInserted2, ("'%s'", paLookupRecs[iLookupRec].Core.pszString)); NOREF(fInserted2);
}
- DBGF_REG_DB_UNLOCK_WRITE(pVM);
+ DBGF_REG_DB_UNLOCK_WRITE(pUVM);
return VINF_SUCCESS;
}
- DBGF_REG_DB_UNLOCK_WRITE(pVM);
+ DBGF_REG_DB_UNLOCK_WRITE(pUVM);
rc = VERR_DUPLICATE;
}
@@ -504,14 +507,16 @@ static int dbgfR3RegRegisterCommon(PVM pVM, PCDBGFREGDESC paRegisters, DBGFREGSE
*/
VMMR3_INT_DECL(int) DBGFR3RegRegisterCpu(PVM pVM, PVMCPU pVCpu, PCDBGFREGDESC paRegisters, bool fGuestRegs)
{
- if (!pVM->dbgf.s.fRegDbInitialized)
+ PUVM pUVM = pVM->pUVM;
+ if (!pUVM->dbgf.s.fRegDbInitialized)
{
- int rc = dbgfR3RegInit(pVM);
+ int rc = dbgfR3RegInit(pUVM);
if (RT_FAILURE(rc))
return rc;
}
- return dbgfR3RegRegisterCommon(pVM, paRegisters, DBGFREGSETTYPE_CPU, pVCpu, fGuestRegs ? "cpu" : "hypercpu", pVCpu->idCpu);
+ return dbgfR3RegRegisterCommon(pUVM, paRegisters, DBGFREGSETTYPE_CPU, pVCpu,
+ fGuestRegs ? "cpu" : "hypercpu", pVCpu->idCpu);
}
@@ -519,19 +524,21 @@ VMMR3_INT_DECL(int) DBGFR3RegRegisterCpu(PVM pVM, PVMCPU pVCpu, PCDBGFREGDESC pa
* Registers a set of registers for a device.
*
* @returns VBox status code.
- * @param enmReg The register identifier.
- * @param enmType The register type. This is for sort out
- * aliases. Pass DBGFREGVALTYPE_INVALID to get
- * the standard name.
+ * @param pVM Pointer to the VM.
+ * @param paRegisters The register descriptors.
+ * @param pDevIns The device instance. This will be the callback user
+ * argument.
+ * @param pszPrefix The device name.
+ * @param iInstance The device instance.
*/
-VMMR3DECL(int) DBGFR3RegRegisterDevice(PVM pVM, PCDBGFREGDESC paRegisters, PPDMDEVINS pDevIns, const char *pszPrefix, uint32_t iInstance)
+VMMR3_INT_DECL(int) DBGFR3RegRegisterDevice(PVM pVM, PCDBGFREGDESC paRegisters, PPDMDEVINS pDevIns,
+ const char *pszPrefix, uint32_t iInstance)
{
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
AssertPtrReturn(paRegisters, VERR_INVALID_POINTER);
AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
AssertPtrReturn(pszPrefix, VERR_INVALID_POINTER);
- return dbgfR3RegRegisterCommon(pVM, paRegisters, DBGFREGSETTYPE_DEVICE, pDevIns, pszPrefix, iInstance);
+ return dbgfR3RegRegisterCommon(pVM->pUVM, paRegisters, DBGFREGSETTYPE_DEVICE, pDevIns, pszPrefix, iInstance);
}
@@ -781,7 +788,7 @@ static int dbgfR3RegValCast(PDBGFREGVAL pValue, DBGFREGVALTYPE enmFromType, DBGF
* @retval VINF_DBGF_TRUNCATED_REGISTER
* @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The virtual CPU ID.
* @param enmReg The register to query.
* @param enmType The desired return type.
@@ -789,18 +796,18 @@ static int dbgfR3RegValCast(PDBGFREGVAL pValue, DBGFREGVALTYPE enmFromType, DBGF
* hypervisor CPU registers if clear (false).
* @param pValue Where to return the register value.
*/
-static DECLCALLBACK(int) dbgfR3RegCpuQueryWorkerOnCpu(PVM pVM, VMCPUID idCpu, DBGFREG enmReg, DBGFREGVALTYPE enmType,
+static DECLCALLBACK(int) dbgfR3RegCpuQueryWorkerOnCpu(PUVM pUVM, VMCPUID idCpu, DBGFREG enmReg, DBGFREGVALTYPE enmType,
bool fGuestRegs, PDBGFREGVAL pValue)
{
int rc = VINF_SUCCESS;
- DBGF_REG_DB_LOCK_READ(pVM);
+ DBGF_REG_DB_LOCK_READ(pUVM);
/*
* Look up the register set of the specified CPU.
*/
PDBGFREGSET pSet = fGuestRegs
- ? pVM->aCpus[idCpu].dbgf.s.pGuestRegSet
- : pVM->aCpus[idCpu].dbgf.s.pHyperRegSet;
+ ? pUVM->aCpus[idCpu].dbgf.s.pGuestRegSet
+ : pUVM->aCpus[idCpu].dbgf.s.pHyperRegSet;
if (RT_LIKELY(pSet))
{
/*
@@ -830,7 +837,7 @@ static DECLCALLBACK(int) dbgfR3RegCpuQueryWorkerOnCpu(PVM pVM, VMCPUID idCpu, DB
else
rc = VERR_INVALID_CPU_ID;
- DBGF_REG_DB_UNLOCK_READ(pVM);
+ DBGF_REG_DB_UNLOCK_READ(pUVM);
return rc;
}
@@ -847,24 +854,25 @@ static DECLCALLBACK(int) dbgfR3RegCpuQueryWorkerOnCpu(PVM pVM, VMCPUID idCpu, DB
* @retval VINF_DBGF_TRUNCATED_REGISTER
* @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The virtual CPU ID. Can be OR'ed with
* DBGFREG_HYPER_VMCPUID.
* @param enmReg The register to query.
* @param enmType The desired return type.
* @param pValue Where to return the register value.
*/
-static int dbgfR3RegCpuQueryWorker(PVM pVM, VMCPUID idCpu, DBGFREG enmReg, DBGFREGVALTYPE enmType, PDBGFREGVAL pValue)
+static int dbgfR3RegCpuQueryWorker(PUVM pUVM, VMCPUID idCpu, DBGFREG enmReg, DBGFREGVALTYPE enmType, PDBGFREGVAL pValue)
{
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
AssertMsgReturn(enmReg >= DBGFREG_AL && enmReg <= DBGFREG_END, ("%d\n", enmReg), VERR_INVALID_PARAMETER);
bool const fGuestRegs = !(idCpu & DBGFREG_HYPER_VMCPUID);
idCpu &= ~DBGFREG_HYPER_VMCPUID;
- AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
- return VMR3ReqPriorityCallWait(pVM, idCpu, (PFNRT)dbgfR3RegCpuQueryWorkerOnCpu, 6,
- pVM, idCpu, enmReg, enmType, fGuestRegs, pValue);
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3RegCpuQueryWorkerOnCpu, 6,
+ pUVM, idCpu, enmReg, enmType, fGuestRegs, pValue);
}
@@ -878,16 +886,16 @@ static int dbgfR3RegCpuQueryWorker(PVM pVM, VMCPUID idCpu, DBGFREG enmReg, DBGFR
* @retval VERR_DBGF_UNSUPPORTED_CAST
* @retval VINF_DBGF_TRUNCATED_REGISTER
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The target CPU ID. Can be OR'ed with
* DBGFREG_HYPER_VMCPUID.
* @param enmReg The register that's being queried.
* @param pu8 Where to store the register value.
*/
-VMMR3DECL(int) DBGFR3RegCpuQueryU8(PVM pVM, VMCPUID idCpu, DBGFREG enmReg, uint8_t *pu8)
+VMMR3DECL(int) DBGFR3RegCpuQueryU8(PUVM pUVM, VMCPUID idCpu, DBGFREG enmReg, uint8_t *pu8)
{
DBGFREGVAL Value;
- int rc = dbgfR3RegCpuQueryWorker(pVM, idCpu, enmReg, DBGFREGVALTYPE_U8, &Value);
+ int rc = dbgfR3RegCpuQueryWorker(pUVM, idCpu, enmReg, DBGFREGVALTYPE_U8, &Value);
if (RT_SUCCESS(rc))
*pu8 = Value.u8;
else
@@ -907,16 +915,16 @@ VMMR3DECL(int) DBGFR3RegCpuQueryU8(PVM pVM, VMCPUID idCpu, DBGFREG enmReg, uint8
* @retval VINF_DBGF_TRUNCATED_REGISTER
* @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The target CPU ID. Can be OR'ed with
* DBGFREG_HYPER_VMCPUID.
* @param enmReg The register that's being queried.
* @param pu16 Where to store the register value.
*/
-VMMR3DECL(int) DBGFR3RegCpuQueryU16(PVM pVM, VMCPUID idCpu, DBGFREG enmReg, uint16_t *pu16)
+VMMR3DECL(int) DBGFR3RegCpuQueryU16(PUVM pUVM, VMCPUID idCpu, DBGFREG enmReg, uint16_t *pu16)
{
DBGFREGVAL Value;
- int rc = dbgfR3RegCpuQueryWorker(pVM, idCpu, enmReg, DBGFREGVALTYPE_U16, &Value);
+ int rc = dbgfR3RegCpuQueryWorker(pUVM, idCpu, enmReg, DBGFREGVALTYPE_U16, &Value);
if (RT_SUCCESS(rc))
*pu16 = Value.u16;
else
@@ -936,16 +944,16 @@ VMMR3DECL(int) DBGFR3RegCpuQueryU16(PVM pVM, VMCPUID idCpu, DBGFREG enmReg, uint
* @retval VINF_DBGF_TRUNCATED_REGISTER
* @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The target CPU ID. Can be OR'ed with
* DBGFREG_HYPER_VMCPUID.
* @param enmReg The register that's being queried.
* @param pu32 Where to store the register value.
*/
-VMMR3DECL(int) DBGFR3RegCpuQueryU32(PVM pVM, VMCPUID idCpu, DBGFREG enmReg, uint32_t *pu32)
+VMMR3DECL(int) DBGFR3RegCpuQueryU32(PUVM pUVM, VMCPUID idCpu, DBGFREG enmReg, uint32_t *pu32)
{
DBGFREGVAL Value;
- int rc = dbgfR3RegCpuQueryWorker(pVM, idCpu, enmReg, DBGFREGVALTYPE_U32, &Value);
+ int rc = dbgfR3RegCpuQueryWorker(pUVM, idCpu, enmReg, DBGFREGVALTYPE_U32, &Value);
if (RT_SUCCESS(rc))
*pu32 = Value.u32;
else
@@ -965,16 +973,16 @@ VMMR3DECL(int) DBGFR3RegCpuQueryU32(PVM pVM, VMCPUID idCpu, DBGFREG enmReg, uint
* @retval VINF_DBGF_TRUNCATED_REGISTER
* @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The target CPU ID. Can be OR'ed with
* DBGFREG_HYPER_VMCPUID.
* @param enmReg The register that's being queried.
* @param pu64 Where to store the register value.
*/
-VMMR3DECL(int) DBGFR3RegCpuQueryU64(PVM pVM, VMCPUID idCpu, DBGFREG enmReg, uint64_t *pu64)
+VMMR3DECL(int) DBGFR3RegCpuQueryU64(PUVM pUVM, VMCPUID idCpu, DBGFREG enmReg, uint64_t *pu64)
{
DBGFREGVAL Value;
- int rc = dbgfR3RegCpuQueryWorker(pVM, idCpu, enmReg, DBGFREGVALTYPE_U64, &Value);
+ int rc = dbgfR3RegCpuQueryWorker(pUVM, idCpu, enmReg, DBGFREGVALTYPE_U64, &Value);
if (RT_SUCCESS(rc))
*pu64 = Value.u64;
else
@@ -1007,10 +1015,10 @@ static void dbgfR3RegGetMsrBatch(PVMCPU pVCpu, PDBGFREGENTRY pReg, uint32_t idMs
}
-static DECLCALLBACK(int) dbgfR3RegCpuQueryBatchWorker(PVM pVM, VMCPUID idCpu, PDBGFREGENTRY paRegs, size_t cRegs)
+static DECLCALLBACK(int) dbgfR3RegCpuQueryBatchWorker(PUVM pUVM, VMCPUID idCpu, PDBGFREGENTRY paRegs, size_t cRegs)
{
#if 0
- PVMCPU pVCpu = &pVM->aCpus[idCpu];
+ PVMCPU pVCpu = &pUVM->pVM->aCpus[idCpu];
PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
PDBGFREGENTRY pReg = paRegs - 1;
@@ -1070,7 +1078,7 @@ static DECLCALLBACK(int) dbgfR3RegCpuQueryBatchWorker(PVM pVM, VMCPUID idCpu, PD
* @retval VERR_INVALID_CPU_ID
* @retval VERR_DBGF_REGISTER_NOT_FOUND
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The target CPU ID. Can be OR'ed with
* DBGFREG_HYPER_VMCPUID.
* @param paRegs Pointer to an array of @a cRegs elements. On
@@ -1080,10 +1088,11 @@ static DECLCALLBACK(int) dbgfR3RegCpuQueryBatchWorker(PVM pVM, VMCPUID idCpu, PD
* as a filler.
* @param cRegs The number of entries in @a paRegs.
*/
-VMMR3DECL(int) DBGFR3RegCpuQueryBatch(PVM pVM, VMCPUID idCpu, PDBGFREGENTRY paRegs, size_t cRegs)
+VMMR3DECL(int) DBGFR3RegCpuQueryBatch(PUVM pUVM, VMCPUID idCpu, PDBGFREGENTRY paRegs, size_t cRegs)
{
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
- AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
if (!cRegs)
return VINF_SUCCESS;
AssertReturn(cRegs < _1M, VERR_OUT_OF_RANGE);
@@ -1095,7 +1104,7 @@ VMMR3DECL(int) DBGFR3RegCpuQueryBatch(PVM pVM, VMCPUID idCpu, PDBGFREGENTRY paRe
AssertMsgReturn(enmReg < DBGFREG_END && enmReg >= DBGFREG_AL, ("%d (%#x)", enmReg, enmReg), VERR_DBGF_REGISTER_NOT_FOUND);
}
- return VMR3ReqCallWait(pVM, idCpu, (PFNRT)dbgfR3RegCpuQueryBatchWorker, 4, pVM, idCpu, paRegs, cRegs);
+ return VMR3ReqCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3RegCpuQueryBatchWorker, 4, pUVM, idCpu, paRegs, cRegs);
}
@@ -1106,7 +1115,7 @@ VMMR3DECL(int) DBGFR3RegCpuQueryBatch(PVM pVM, VMCPUID idCpu, PDBGFREGENTRY paRe
* @retval VERR_INVALID_VM_HANDLE
* @retval VERR_INVALID_CPU_ID
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The target CPU ID. Can be OR'ed with
* DBGFREG_HYPER_VMCPUID.
* @param paRegs Pointer to an array of @a cRegs elements.
@@ -1117,13 +1126,14 @@ VMMR3DECL(int) DBGFR3RegCpuQueryBatch(PVM pVM, VMCPUID idCpu, PDBGFREGENTRY paRe
* @param cRegs The number of entries in @a paRegs. The
* recommended value is DBGFREG_ALL_COUNT.
*/
-VMMR3DECL(int) DBGFR3RegCpuQueryAll(PVM pVM, VMCPUID idCpu, PDBGFREGENTRY paRegs, size_t cRegs)
+VMMR3DECL(int) DBGFR3RegCpuQueryAll(PUVM pUVM, VMCPUID idCpu, PDBGFREGENTRY paRegs, size_t cRegs)
{
/*
* Validate input.
*/
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
- AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
+ AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
if (!cRegs)
return VINF_SUCCESS;
AssertReturn(cRegs < _1M, VERR_OUT_OF_RANGE);
@@ -1141,7 +1151,7 @@ VMMR3DECL(int) DBGFR3RegCpuQueryAll(PVM pVM, VMCPUID idCpu, PDBGFREGENTRY paRegs
while (iReg < cRegs)
paRegs[iReg++].enmReg = DBGFREG_END;
- return VMR3ReqCallWait(pVM, idCpu, (PFNRT)dbgfR3RegCpuQueryBatchWorker, 4, pVM, idCpu, paRegs, cRegs);
+ return VMR3ReqCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3RegCpuQueryBatchWorker, 4, pUVM, idCpu, paRegs, cRegs);
}
#endif /* rewrite or remove? */
@@ -1152,19 +1162,20 @@ VMMR3DECL(int) DBGFR3RegCpuQueryAll(PVM pVM, VMCPUID idCpu, PDBGFREGENTRY paRegs
* @returns Pointer to read-only register name (lower case). NULL if the
* parameters are invalid.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param enmReg The register identifier.
* @param enmType The register type. This is for sort out
* aliases. Pass DBGFREGVALTYPE_INVALID to get
* the standard name.
*/
-VMMR3DECL(const char *) DBGFR3RegCpuName(PVM pVM, DBGFREG enmReg, DBGFREGVALTYPE enmType)
+VMMR3DECL(const char *) DBGFR3RegCpuName(PUVM pUVM, DBGFREG enmReg, DBGFREGVALTYPE enmType)
{
AssertReturn(enmReg >= DBGFREG_AL && enmReg < DBGFREG_END, NULL);
AssertReturn(enmType >= DBGFREGVALTYPE_INVALID && enmType < DBGFREGVALTYPE_END, NULL);
- VM_ASSERT_VALID_EXT_RETURN(pVM, NULL);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
- PCDBGFREGSET pSet = pVM->aCpus[0].dbgf.s.pGuestRegSet;
+ PCDBGFREGSET pSet = pUVM->aCpus[0].dbgf.s.pGuestRegSet;
if (RT_UNLIKELY(!pSet))
return NULL;
@@ -1220,18 +1231,19 @@ static ssize_t dbgfR3RegCopyToLower(const char *pszSrc, size_t cchSrc, char *psz
* Resolves the register name.
*
* @returns Lookup record.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idDefCpu The default CPU ID set.
* @param pszReg The register name.
* @param fGuestRegs Default to guest CPU registers if set, the
* hypervisor CPU registers if clear.
*/
-static PCDBGFREGLOOKUP dbgfR3RegResolve(PVM pVM, VMCPUID idDefCpu, const char *pszReg, bool fGuestRegs)
+static PCDBGFREGLOOKUP dbgfR3RegResolve(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, bool fGuestRegs)
{
- DBGF_REG_DB_LOCK_READ(pVM);
+ DBGF_REG_DB_LOCK_READ(pUVM);
/* Try looking up the name without any case folding or cpu prefixing. */
- PCDBGFREGLOOKUP pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGet(&pVM->dbgf.s.RegSpace, pszReg);
+ PRTSTRSPACE pRegSpace = &pUVM->dbgf.s.RegSpace;
+ PCDBGFREGLOOKUP pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGet(pRegSpace, pszReg);
if (!pLookupRec)
{
char szName[DBGF_REG_MAX_NAME * 4 + 16];
@@ -1239,7 +1251,7 @@ static PCDBGFREGLOOKUP dbgfR3RegResolve(PVM pVM, VMCPUID idDefCpu, const char *p
/* Lower case it and try again. */
ssize_t cchFolded = dbgfR3RegCopyToLower(pszReg, RTSTR_MAX, szName, sizeof(szName) - DBGF_REG_MAX_NAME);
if (cchFolded > 0)
- pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGet(&pVM->dbgf.s.RegSpace, szName);
+ pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGet(pRegSpace, szName);
if ( !pLookupRec
&& cchFolded >= 0
&& idDefCpu != VMCPUID_ANY)
@@ -1247,11 +1259,11 @@ static PCDBGFREGLOOKUP dbgfR3RegResolve(PVM pVM, VMCPUID idDefCpu, const char *p
/* Prefix it with the specified CPU set. */
size_t cchCpuSet = RTStrPrintf(szName, sizeof(szName), fGuestRegs ? "cpu%u." : "hypercpu%u.", idDefCpu);
dbgfR3RegCopyToLower(pszReg, RTSTR_MAX, &szName[cchCpuSet], sizeof(szName) - cchCpuSet);
- pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGet(&pVM->dbgf.s.RegSpace, szName);
+ pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGet(pRegSpace, szName);
}
}
- DBGF_REG_DB_UNLOCK_READ(pVM);
+ DBGF_REG_DB_UNLOCK_READ(pUVM);
return pLookupRec;
}
@@ -1263,24 +1275,31 @@ static PCDBGFREGLOOKUP dbgfR3RegResolve(PVM pVM, VMCPUID idDefCpu, const char *p
* @retval VINF_SUCCESS if the register was found.
* @retval VERR_DBGF_REGISTER_NOT_FOUND if not found.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idDefCpu The default CPU.
* @param pszReg The registe name.
*/
-VMMR3DECL(int) DBGFR3RegNmValidate(PVM pVM, VMCPUID idDefCpu, const char *pszReg)
+VMMR3DECL(int) DBGFR3RegNmValidate(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg)
{
/*
* Validate input.
*/
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
- AssertReturn((idDefCpu & ~DBGFREG_HYPER_VMCPUID) < pVM->cCpus || idDefCpu == VMCPUID_ANY, VERR_INVALID_CPU_ID);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn((idDefCpu & ~DBGFREG_HYPER_VMCPUID) < pUVM->cCpus || idDefCpu == VMCPUID_ANY, VERR_INVALID_CPU_ID);
AssertPtrReturn(pszReg, VERR_INVALID_POINTER);
/*
* Resolve the register.
*/
- bool const fGuestRegs = !(idDefCpu & DBGFREG_HYPER_VMCPUID) && idDefCpu != VMCPUID_ANY;
- PCDBGFREGLOOKUP pLookupRec = dbgfR3RegResolve(pVM, idDefCpu, pszReg, fGuestRegs);
+ bool fGuestRegs = true;
+ if ((idDefCpu & DBGFREG_HYPER_VMCPUID) && idDefCpu != VMCPUID_ANY)
+ {
+ fGuestRegs = false;
+ idDefCpu &= ~DBGFREG_HYPER_VMCPUID;
+ }
+
+ PCDBGFREGLOOKUP pLookupRec = dbgfR3RegResolve(pUVM, idDefCpu, pszReg, fGuestRegs);
if (!pLookupRec)
return VERR_DBGF_REGISTER_NOT_FOUND;
return VINF_SUCCESS;
@@ -1293,14 +1312,14 @@ VMMR3DECL(int) DBGFR3RegNmValidate(PVM pVM, VMCPUID idDefCpu, const char *pszReg
*
* @returns VBox status code.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pLookupRec The register lookup record.
* @param enmType The desired return type.
* @param pValue Where to return the register value.
* @param penmType Where to store the register value type.
* Optional.
*/
-static DECLCALLBACK(int) dbgfR3RegNmQueryWorkerOnCpu(PVM pVM, PCDBGFREGLOOKUP pLookupRec, DBGFREGVALTYPE enmType,
+static DECLCALLBACK(int) dbgfR3RegNmQueryWorkerOnCpu(PUVM pUVM, PCDBGFREGLOOKUP pLookupRec, DBGFREGVALTYPE enmType,
PDBGFREGVAL pValue, PDBGFREGVALTYPE penmType)
{
PCDBGFREGDESC pDesc = pLookupRec->pDesc;
@@ -1309,7 +1328,7 @@ static DECLCALLBACK(int) dbgfR3RegNmQueryWorkerOnCpu(PVM pVM, PCDBGFREGLOOKUP pL
DBGFREGVALTYPE enmValueType = pDesc->enmType;
int rc;
- NOREF(pVM);
+ NOREF(pUVM);
/*
* Get the register or sub-field value.
@@ -1408,7 +1427,7 @@ static DECLCALLBACK(int) dbgfR3RegNmQueryWorkerOnCpu(PVM pVM, PCDBGFREGLOOKUP pL
* @retval VINF_DBGF_TRUNCATED_REGISTER
* @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idDefCpu The virtual CPU ID for the default CPU register
* set. Can be OR'ed with DBGFREG_HYPER_VMCPUID.
* @param pszReg The register to query.
@@ -1417,14 +1436,15 @@ static DECLCALLBACK(int) dbgfR3RegNmQueryWorkerOnCpu(PVM pVM, PCDBGFREGLOOKUP pL
* @param penmType Where to store the register value type.
* Optional.
*/
-static int dbgfR3RegNmQueryWorker(PVM pVM, VMCPUID idDefCpu, const char *pszReg, DBGFREGVALTYPE enmType,
+static int dbgfR3RegNmQueryWorker(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, DBGFREGVALTYPE enmType,
PDBGFREGVAL pValue, PDBGFREGVALTYPE penmType)
{
/*
* Validate input.
*/
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
- AssertReturn((idDefCpu & ~DBGFREG_HYPER_VMCPUID) < pVM->cCpus || idDefCpu == VMCPUID_ANY, VERR_INVALID_CPU_ID);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn((idDefCpu & ~DBGFREG_HYPER_VMCPUID) < pUVM->cCpus || idDefCpu == VMCPUID_ANY, VERR_INVALID_CPU_ID);
AssertPtrReturn(pszReg, VERR_INVALID_POINTER);
Assert(enmType > DBGFREGVALTYPE_INVALID && enmType <= DBGFREGVALTYPE_END);
@@ -1433,15 +1453,21 @@ static int dbgfR3RegNmQueryWorker(PVM pVM, VMCPUID idDefCpu, const char *pszReg,
/*
* Resolve the register and call the getter on the relevant CPU.
*/
- bool const fGuestRegs = !(idDefCpu & DBGFREG_HYPER_VMCPUID) && idDefCpu != VMCPUID_ANY;
- PCDBGFREGLOOKUP pLookupRec = dbgfR3RegResolve(pVM, idDefCpu, pszReg, fGuestRegs);
+ bool fGuestRegs = true;
+ if ((idDefCpu & DBGFREG_HYPER_VMCPUID) && idDefCpu != VMCPUID_ANY)
+ {
+ fGuestRegs = false;
+ idDefCpu &= ~DBGFREG_HYPER_VMCPUID;
+ }
+ PCDBGFREGLOOKUP pLookupRec = dbgfR3RegResolve(pUVM, idDefCpu, pszReg, fGuestRegs);
if (pLookupRec)
{
if (pLookupRec->pSet->enmType == DBGFREGSETTYPE_CPU)
idDefCpu = pLookupRec->pSet->uUserArg.pVCpu->idCpu;
else if (idDefCpu != VMCPUID_ANY)
idDefCpu &= ~DBGFREG_HYPER_VMCPUID;
- return VMR3ReqPriorityCallWait(pVM, idDefCpu, (PFNRT)dbgfR3RegNmQueryWorkerOnCpu, 5, pVM, pLookupRec, enmType, pValue, penmType);
+ return VMR3ReqPriorityCallWaitU(pUVM, idDefCpu, (PFNRT)dbgfR3RegNmQueryWorkerOnCpu, 5,
+ pUVM, pLookupRec, enmType, pValue, penmType);
}
return VERR_DBGF_REGISTER_NOT_FOUND;
}
@@ -1455,7 +1481,7 @@ static int dbgfR3RegNmQueryWorker(PVM pVM, VMCPUID idDefCpu, const char *pszReg,
* @retval VERR_INVALID_CPU_ID
* @retval VERR_DBGF_REGISTER_NOT_FOUND
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idDefCpu The default target CPU ID, VMCPUID_ANY if not
* applicable. Can be OR'ed with
* DBGFREG_HYPER_VMCPUID.
@@ -1465,9 +1491,9 @@ static int dbgfR3RegNmQueryWorker(PVM pVM, VMCPUID idDefCpu, const char *pszReg,
* @param pValue Where to store the register value.
* @param penmType Where to store the register value type.
*/
-VMMR3DECL(int) DBGFR3RegNmQuery(PVM pVM, VMCPUID idDefCpu, const char *pszReg, PDBGFREGVAL pValue, PDBGFREGVALTYPE penmType)
+VMMR3DECL(int) DBGFR3RegNmQuery(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, PDBGFREGVAL pValue, PDBGFREGVALTYPE penmType)
{
- return dbgfR3RegNmQueryWorker(pVM, idDefCpu, pszReg, DBGFREGVALTYPE_END, pValue, penmType);
+ return dbgfR3RegNmQueryWorker(pUVM, idDefCpu, pszReg, DBGFREGVALTYPE_END, pValue, penmType);
}
@@ -1481,7 +1507,7 @@ VMMR3DECL(int) DBGFR3RegNmQuery(PVM pVM, VMCPUID idDefCpu, const char *pszReg, P
* @retval VERR_DBGF_UNSUPPORTED_CAST
* @retval VINF_DBGF_TRUNCATED_REGISTER
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idDefCpu The default target CPU ID, VMCPUID_ANY if not
* applicable. Can be OR'ed with
* DBGFREG_HYPER_VMCPUID.
@@ -1490,10 +1516,10 @@ VMMR3DECL(int) DBGFR3RegNmQuery(PVM pVM, VMCPUID idDefCpu, const char *pszReg, P
* "set.reg[.sub]".
* @param pu8 Where to store the register value.
*/
-VMMR3DECL(int) DBGFR3RegNmQueryU8(PVM pVM, VMCPUID idDefCpu, const char *pszReg, uint8_t *pu8)
+VMMR3DECL(int) DBGFR3RegNmQueryU8(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, uint8_t *pu8)
{
DBGFREGVAL Value;
- int rc = dbgfR3RegNmQueryWorker(pVM, idDefCpu, pszReg, DBGFREGVALTYPE_U8, &Value, NULL);
+ int rc = dbgfR3RegNmQueryWorker(pUVM, idDefCpu, pszReg, DBGFREGVALTYPE_U8, &Value, NULL);
if (RT_SUCCESS(rc))
*pu8 = Value.u8;
else
@@ -1513,7 +1539,7 @@ VMMR3DECL(int) DBGFR3RegNmQueryU8(PVM pVM, VMCPUID idDefCpu, const char *pszReg,
* @retval VINF_DBGF_TRUNCATED_REGISTER
* @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idDefCpu The default target CPU ID, VMCPUID_ANY if not
* applicable. Can be OR'ed with
* DBGFREG_HYPER_VMCPUID.
@@ -1522,10 +1548,10 @@ VMMR3DECL(int) DBGFR3RegNmQueryU8(PVM pVM, VMCPUID idDefCpu, const char *pszReg,
* "set.reg[.sub]".
* @param pu16 Where to store the register value.
*/
-VMMR3DECL(int) DBGFR3RegNmQueryU16(PVM pVM, VMCPUID idDefCpu, const char *pszReg, uint16_t *pu16)
+VMMR3DECL(int) DBGFR3RegNmQueryU16(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, uint16_t *pu16)
{
DBGFREGVAL Value;
- int rc = dbgfR3RegNmQueryWorker(pVM, idDefCpu, pszReg, DBGFREGVALTYPE_U16, &Value, NULL);
+ int rc = dbgfR3RegNmQueryWorker(pUVM, idDefCpu, pszReg, DBGFREGVALTYPE_U16, &Value, NULL);
if (RT_SUCCESS(rc))
*pu16 = Value.u16;
else
@@ -1545,7 +1571,7 @@ VMMR3DECL(int) DBGFR3RegNmQueryU16(PVM pVM, VMCPUID idDefCpu, const char *pszReg
* @retval VINF_DBGF_TRUNCATED_REGISTER
* @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idDefCpu The default target CPU ID, VMCPUID_ANY if not
* applicable. Can be OR'ed with
* DBGFREG_HYPER_VMCPUID.
@@ -1554,10 +1580,10 @@ VMMR3DECL(int) DBGFR3RegNmQueryU16(PVM pVM, VMCPUID idDefCpu, const char *pszReg
* "set.reg[.sub]".
* @param pu32 Where to store the register value.
*/
-VMMR3DECL(int) DBGFR3RegNmQueryU32(PVM pVM, VMCPUID idDefCpu, const char *pszReg, uint32_t *pu32)
+VMMR3DECL(int) DBGFR3RegNmQueryU32(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, uint32_t *pu32)
{
DBGFREGVAL Value;
- int rc = dbgfR3RegNmQueryWorker(pVM, idDefCpu, pszReg, DBGFREGVALTYPE_U32, &Value, NULL);
+ int rc = dbgfR3RegNmQueryWorker(pUVM, idDefCpu, pszReg, DBGFREGVALTYPE_U32, &Value, NULL);
if (RT_SUCCESS(rc))
*pu32 = Value.u32;
else
@@ -1577,7 +1603,7 @@ VMMR3DECL(int) DBGFR3RegNmQueryU32(PVM pVM, VMCPUID idDefCpu, const char *pszReg
* @retval VINF_DBGF_TRUNCATED_REGISTER
* @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idDefCpu The default target CPU ID, VMCPUID_ANY if not
* applicable. Can be OR'ed with
* DBGFREG_HYPER_VMCPUID.
@@ -1586,10 +1612,10 @@ VMMR3DECL(int) DBGFR3RegNmQueryU32(PVM pVM, VMCPUID idDefCpu, const char *pszReg
* "set.reg[.sub]".
* @param pu64 Where to store the register value.
*/
-VMMR3DECL(int) DBGFR3RegNmQueryU64(PVM pVM, VMCPUID idDefCpu, const char *pszReg, uint64_t *pu64)
+VMMR3DECL(int) DBGFR3RegNmQueryU64(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, uint64_t *pu64)
{
DBGFREGVAL Value;
- int rc = dbgfR3RegNmQueryWorker(pVM, idDefCpu, pszReg, DBGFREGVALTYPE_U64, &Value, NULL);
+ int rc = dbgfR3RegNmQueryWorker(pUVM, idDefCpu, pszReg, DBGFREGVALTYPE_U64, &Value, NULL);
if (RT_SUCCESS(rc))
*pu64 = Value.u64;
else
@@ -1609,7 +1635,7 @@ VMMR3DECL(int) DBGFR3RegNmQueryU64(PVM pVM, VMCPUID idDefCpu, const char *pszReg
* @retval VINF_DBGF_TRUNCATED_REGISTER
* @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idDefCpu The default target CPU ID, VMCPUID_ANY if not
* applicable. Can be OR'ed with
* DBGFREG_HYPER_VMCPUID.
@@ -1618,10 +1644,10 @@ VMMR3DECL(int) DBGFR3RegNmQueryU64(PVM pVM, VMCPUID idDefCpu, const char *pszReg
* "set.reg[.sub]".
* @param pu128 Where to store the register value.
*/
-VMMR3DECL(int) DBGFR3RegNmQueryU128(PVM pVM, VMCPUID idDefCpu, const char *pszReg, PRTUINT128U pu128)
+VMMR3DECL(int) DBGFR3RegNmQueryU128(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, PRTUINT128U pu128)
{
DBGFREGVAL Value;
- int rc = dbgfR3RegNmQueryWorker(pVM, idDefCpu, pszReg, DBGFREGVALTYPE_U128, &Value, NULL);
+ int rc = dbgfR3RegNmQueryWorker(pUVM, idDefCpu, pszReg, DBGFREGVALTYPE_U128, &Value, NULL);
if (RT_SUCCESS(rc))
*pu128 = Value.u128;
else
@@ -1642,7 +1668,7 @@ VMMR3DECL(int) DBGFR3RegNmQueryU128(PVM pVM, VMCPUID idDefCpu, const char *pszRe
* @retval VINF_DBGF_TRUNCATED_REGISTER
* @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idDefCpu The default target CPU ID, VMCPUID_ANY if not
* applicable. Can be OR'ed with
* DBGFREG_HYPER_VMCPUID.
@@ -1651,10 +1677,10 @@ VMMR3DECL(int) DBGFR3RegNmQueryU128(PVM pVM, VMCPUID idDefCpu, const char *pszRe
* "set.reg[.sub]".
* @param plrd Where to store the register value.
*/
-VMMR3DECL(int) DBGFR3RegNmQueryLrd(PVM pVM, VMCPUID idDefCpu, const char *pszReg, long double *plrd)
+VMMR3DECL(int) DBGFR3RegNmQueryLrd(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, long double *plrd)
{
DBGFREGVAL Value;
- int rc = dbgfR3RegNmQueryWorker(pVM, idDefCpu, pszReg, DBGFREGVALTYPE_R80, &Value, NULL);
+ int rc = dbgfR3RegNmQueryWorker(pUVM, idDefCpu, pszReg, DBGFREGVALTYPE_R80, &Value, NULL);
if (RT_SUCCESS(rc))
*plrd = Value.lrd;
else
@@ -1675,7 +1701,7 @@ VMMR3DECL(int) DBGFR3RegNmQueryLrd(PVM pVM, VMCPUID idDefCpu, const char *pszReg
* @retval VINF_DBGF_TRUNCATED_REGISTER
* @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idDefCpu The default target CPU ID, VMCPUID_ANY if not
* applicable. Can be OR'ed with
* DBGFREG_HYPER_VMCPUID.
@@ -1685,10 +1711,10 @@ VMMR3DECL(int) DBGFR3RegNmQueryLrd(PVM pVM, VMCPUID idDefCpu, const char *pszReg
* @param pu64Base Where to store the register base value.
* @param pu32Limit Where to store the register limit value.
*/
-VMMR3DECL(int) DBGFR3RegNmQueryXdtr(PVM pVM, VMCPUID idDefCpu, const char *pszReg, uint64_t *pu64Base, uint32_t *pu32Limit)
+VMMR3DECL(int) DBGFR3RegNmQueryXdtr(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, uint64_t *pu64Base, uint32_t *pu32Limit)
{
DBGFREGVAL Value;
- int rc = dbgfR3RegNmQueryWorker(pVM, idDefCpu, pszReg, DBGFREGVALTYPE_DTR, &Value, NULL);
+ int rc = dbgfR3RegNmQueryWorker(pUVM, idDefCpu, pszReg, DBGFREGVALTYPE_DTR, &Value, NULL);
if (RT_SUCCESS(rc))
{
*pu64Base = Value.dtr.u64Base;
@@ -1703,20 +1729,20 @@ VMMR3DECL(int) DBGFR3RegNmQueryXdtr(PVM pVM, VMCPUID idDefCpu, const char *pszRe
}
-/// @todo VMMR3DECL(int) DBGFR3RegNmQueryBatch(PVM pVM,VMCPUID idDefCpu, DBGFREGENTRYNM paRegs, size_t cRegs);
+/// @todo VMMR3DECL(int) DBGFR3RegNmQueryBatch(PUVM pUVM,VMCPUID idDefCpu, DBGFREGENTRYNM paRegs, size_t cRegs);
/**
* Gets the number of registers returned by DBGFR3RegNmQueryAll.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pcRegs Where to return the register count.
*/
-VMMR3DECL(int) DBGFR3RegNmQueryAllCount(PVM pVM, size_t *pcRegs)
+VMMR3DECL(int) DBGFR3RegNmQueryAllCount(PUVM pUVM, size_t *pcRegs)
{
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
- *pcRegs = pVM->dbgf.s.cRegs;
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ *pcRegs = pUVM->dbgf.s.cRegs;
return VINF_SUCCESS;
}
@@ -1802,17 +1828,19 @@ static DECLCALLBACK(VBOXSTRICTRC) dbgfR3RegNmQueryAllWorker(PVM pVM, PVMCPU pVCp
PDBGFR3REGNMQUERYALLARGS pArgs = (PDBGFR3REGNMQUERYALLARGS)pvUser;
PDBGFREGENTRYNM paRegs = pArgs->paRegs;
size_t const cRegs = pArgs->cRegs;
+ PUVM pUVM = pVM->pUVM;
+ PUVMCPU pUVCpu = pVCpu->pUVCpu;
- DBGF_REG_DB_LOCK_READ(pVM);
+ DBGF_REG_DB_LOCK_READ(pUVM);
/*
* My guest CPU registers.
*/
size_t iCpuReg = pVCpu->idCpu * DBGFREG_ALL_COUNT;
- if (pVCpu->dbgf.s.pGuestRegSet)
+ if (pUVCpu->dbgf.s.pGuestRegSet)
{
if (iCpuReg < cRegs)
- dbgfR3RegNmQueryAllInSet(pVCpu->dbgf.s.pGuestRegSet, DBGFREG_ALL_COUNT, &paRegs[iCpuReg], cRegs - iCpuReg);
+ dbgfR3RegNmQueryAllInSet(pUVCpu->dbgf.s.pGuestRegSet, DBGFREG_ALL_COUNT, &paRegs[iCpuReg], cRegs - iCpuReg);
}
else
dbgfR3RegNmQueryAllPadEntries(paRegs, cRegs, iCpuReg, DBGFREG_ALL_COUNT);
@@ -1820,11 +1848,11 @@ static DECLCALLBACK(VBOXSTRICTRC) dbgfR3RegNmQueryAllWorker(PVM pVM, PVMCPU pVCp
/*
* My hypervisor CPU registers.
*/
- iCpuReg = pVM->cCpus * DBGFREG_ALL_COUNT + pVCpu->idCpu * DBGFREG_ALL_COUNT;
- if (pVCpu->dbgf.s.pHyperRegSet)
+ iCpuReg = pUVM->cCpus * DBGFREG_ALL_COUNT + pUVCpu->idCpu * DBGFREG_ALL_COUNT;
+ if (pUVCpu->dbgf.s.pHyperRegSet)
{
if (iCpuReg < cRegs)
- dbgfR3RegNmQueryAllInSet(pVCpu->dbgf.s.pHyperRegSet, DBGFREG_ALL_COUNT, &paRegs[iCpuReg], cRegs - iCpuReg);
+ dbgfR3RegNmQueryAllInSet(pUVCpu->dbgf.s.pHyperRegSet, DBGFREG_ALL_COUNT, &paRegs[iCpuReg], cRegs - iCpuReg);
}
else
dbgfR3RegNmQueryAllPadEntries(paRegs, cRegs, iCpuReg, DBGFREG_ALL_COUNT);
@@ -1832,14 +1860,14 @@ static DECLCALLBACK(VBOXSTRICTRC) dbgfR3RegNmQueryAllWorker(PVM pVM, PVMCPU pVCp
/*
* The primary CPU does all the other registers.
*/
- if (pVCpu->idCpu == 0)
+ if (pUVCpu->idCpu == 0)
{
- pArgs->iReg = pVM->cCpus * DBGFREG_ALL_COUNT * 2;
- RTStrSpaceEnumerate(&pVM->dbgf.s.RegSetSpace, dbgfR3RegNmQueryAllEnum, pArgs);
+ pArgs->iReg = pUVM->cCpus * DBGFREG_ALL_COUNT * 2;
+ RTStrSpaceEnumerate(&pUVM->dbgf.s.RegSetSpace, dbgfR3RegNmQueryAllEnum, pArgs);
dbgfR3RegNmQueryAllPadEntries(paRegs, cRegs, pArgs->iReg, cRegs);
}
- DBGF_REG_DB_UNLOCK_READ(pVM);
+ DBGF_REG_DB_UNLOCK_READ(pUVM);
return VINF_SUCCESS; /* Ignore errors. */
}
@@ -1848,7 +1876,7 @@ static DECLCALLBACK(VBOXSTRICTRC) dbgfR3RegNmQueryAllWorker(PVM pVM, PVMCPU pVCp
* Queries all register.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param paRegs The output register value array. The register
* name string is read only and shall not be freed
* or modified.
@@ -1856,8 +1884,10 @@ static DECLCALLBACK(VBOXSTRICTRC) dbgfR3RegNmQueryAllWorker(PVM pVM, PVMCPU pVCp
* correct size can be obtained by calling
* DBGFR3RegNmQueryAllCount.
*/
-VMMR3DECL(int) DBGFR3RegNmQueryAll(PVM pVM, PDBGFREGENTRYNM paRegs, size_t cRegs)
+VMMR3DECL(int) DBGFR3RegNmQueryAll(PUVM pUVM, PDBGFREGENTRYNM paRegs, size_t cRegs)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
AssertPtrReturn(paRegs, VERR_INVALID_POINTER);
AssertReturn(cRegs > 0, VERR_OUT_OF_RANGE);
@@ -1870,10 +1900,224 @@ VMMR3DECL(int) DBGFR3RegNmQueryAll(PVM pVM, PDBGFREGENTRYNM paRegs, size_t cRegs
}
-VMMR3DECL(int) DBGFR3RegNmSet(PVM pVM, VMCPUID idDefCpu, const char *pszReg, PCDBGFREGVAL pValue, DBGFREGVALTYPE enmType)
+/**
+ * On CPU worker for the register modifications, used by DBGFR3RegNmSet.
+ *
+ * @returns VBox status code.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param pLookupRec The register lookup record. Maybe be modified,
+ * so please pass a copy of the user's one.
+ * @param pValue The new register value.
+ * @param enmType The register value type.
+ */
+static DECLCALLBACK(int) dbgfR3RegNmSetWorkerOnCpu(PUVM pUVM, PDBGFREGLOOKUP pLookupRec,
+ PCDBGFREGVAL pValue, PCDBGFREGVAL pMask)
{
- NOREF(pVM); NOREF(idDefCpu); NOREF(pszReg); NOREF(pValue); NOREF(enmType);
- return VERR_NOT_IMPLEMENTED;
+ PCDBGFREGSUBFIELD pSubField = pLookupRec->pSubField;
+ if (pSubField && pSubField->pfnSet)
+ return pSubField->pfnSet(pLookupRec->pSet->uUserArg.pv, pSubField, pValue->u128, pMask->u128);
+ return pLookupRec->pDesc->pfnSet(pLookupRec->pSet->uUserArg.pv, pLookupRec->pDesc, pValue, pMask);
+}
+
+
+/**
+ * Worker for the register setting.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS
+ * @retval VERR_INVALID_VM_HANDLE
+ * @retval VERR_INVALID_CPU_ID
+ * @retval VERR_DBGF_REGISTER_NOT_FOUND
+ * @retval VERR_DBGF_UNSUPPORTED_CAST
+ * @retval VINF_DBGF_TRUNCATED_REGISTER
+ * @retval VINF_DBGF_ZERO_EXTENDED_REGISTER
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idDefCpu The virtual CPU ID for the default CPU register
+ * set. Can be OR'ed with DBGFREG_HYPER_VMCPUID.
+ * @param pszReg The register to query.
+ * @param pValue The value to set
+ * @param enmType How to interpret the value in @a pValue.
+ */
+VMMR3DECL(int) DBGFR3RegNmSet(PUVM pUVM, VMCPUID idDefCpu, const char *pszReg, PCDBGFREGVAL pValue, DBGFREGVALTYPE enmType)
+{
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn((idDefCpu & ~DBGFREG_HYPER_VMCPUID) < pUVM->cCpus || idDefCpu == VMCPUID_ANY, VERR_INVALID_CPU_ID);
+ AssertPtrReturn(pszReg, VERR_INVALID_POINTER);
+ AssertReturn(enmType > DBGFREGVALTYPE_INVALID && enmType < DBGFREGVALTYPE_END, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pValue, VERR_INVALID_PARAMETER);
+
+ /*
+ * Resolve the register and check that it is writable.
+ */
+ bool fGuestRegs = true;
+ if ((idDefCpu & DBGFREG_HYPER_VMCPUID) && idDefCpu != VMCPUID_ANY)
+ {
+ fGuestRegs = false;
+ idDefCpu &= ~DBGFREG_HYPER_VMCPUID;
+ }
+ PCDBGFREGLOOKUP pLookupRec = dbgfR3RegResolve(pUVM, idDefCpu, pszReg, fGuestRegs);
+ if (pLookupRec)
+ {
+ PCDBGFREGDESC pDesc = pLookupRec->pDesc;
+ PCDBGFREGSET pSet = pLookupRec->pSet;
+ PCDBGFREGSUBFIELD pSubField = pLookupRec->pSubField;
+
+ if ( !(pDesc->fFlags & DBGFREG_FLAGS_READ_ONLY)
+ && (pSubField
+ ? !(pSubField->fFlags & DBGFREGSUBFIELD_FLAGS_READ_ONLY)
+ && (pSubField->pfnSet != NULL || pDesc->pfnSet != NULL)
+ : pDesc->pfnSet != NULL) )
+ {
+ /*
+ * Calculate the modification mask and cast the input value to the
+ * type of the target register.
+ */
+ DBGFREGVAL Mask = DBGFREGVAL_INITIALIZE_ZERO;
+ DBGFREGVAL Value = DBGFREGVAL_INITIALIZE_ZERO;
+ switch (enmType)
+ {
+ case DBGFREGVALTYPE_U8:
+ Value.u8 = pValue->u8;
+ Mask.u8 = UINT8_MAX;
+ break;
+ case DBGFREGVALTYPE_U16:
+ Value.u16 = pValue->u16;
+ Mask.u16 = UINT16_MAX;
+ break;
+ case DBGFREGVALTYPE_U32:
+ Value.u32 = pValue->u32;
+ Mask.u32 = UINT32_MAX;
+ break;
+ case DBGFREGVALTYPE_U64:
+ Value.u64 = pValue->u64;
+ Mask.u64 = UINT64_MAX;
+ break;
+ case DBGFREGVALTYPE_U128:
+ Value.u128 = pValue->u128;
+ Mask.u128.s.Lo = UINT64_MAX;
+ Mask.u128.s.Hi = UINT64_MAX;
+ break;
+ case DBGFREGVALTYPE_R80:
+#ifdef RT_COMPILER_WITH_80BIT_LONG_DOUBLE
+ Value.r80Ex.lrd = pValue->r80Ex.lrd;
+#else
+ Value.r80Ex.au64[0] = pValue->r80Ex.au64[0];
+ Value.r80Ex.au16[4] = pValue->r80Ex.au16[4];
+#endif
+ Value.r80Ex.au64[0] = UINT64_MAX;
+ Value.r80Ex.au16[4] = UINT16_MAX;
+ break;
+ case DBGFREGVALTYPE_DTR:
+ Value.dtr.u32Limit = pValue->dtr.u32Limit;
+ Value.dtr.u64Base = pValue->dtr.u64Base;
+ Mask.dtr.u32Limit = UINT32_MAX;
+ Mask.dtr.u64Base = UINT64_MAX;
+ break;
+ case DBGFREGVALTYPE_32BIT_HACK:
+ case DBGFREGVALTYPE_END:
+ case DBGFREGVALTYPE_INVALID:
+ AssertFailedReturn(VERR_INTERNAL_ERROR_3);
+ }
+
+ int rc = VINF_SUCCESS;
+ DBGFREGVALTYPE enmRegType = pDesc->enmType;
+ if (pSubField)
+ {
+ unsigned const cBits = pSubField->cBits + pSubField->cShift;
+ if (cBits <= 8)
+ enmRegType = DBGFREGVALTYPE_U8;
+ else if (cBits <= 16)
+ enmRegType = DBGFREGVALTYPE_U16;
+ else if (cBits <= 32)
+ enmRegType = DBGFREGVALTYPE_U32;
+ else if (cBits <= 64)
+ enmRegType = DBGFREGVALTYPE_U64;
+ else
+ enmRegType = DBGFREGVALTYPE_U128;
+ }
+ else if (pLookupRec->pAlias)
+ {
+ /* Restrict the input to the size of the alias register. */
+ DBGFREGVALTYPE enmAliasType = pLookupRec->pAlias->enmType;
+ if (enmAliasType != enmType)
+ {
+ rc = dbgfR3RegValCast(&Value, enmType, enmAliasType);
+ if (RT_FAILURE(rc))
+ return rc;
+ dbgfR3RegValCast(&Mask, enmType, enmAliasType);
+ enmType = enmAliasType;
+ }
+ }
+
+ if (enmType != enmRegType)
+ {
+ int rc2 = dbgfR3RegValCast(&Value, enmType, enmRegType);
+ if (RT_FAILURE(rc2))
+ return rc2;
+ if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
+ rc2 = VINF_SUCCESS;
+ dbgfR3RegValCast(&Mask, enmType, enmRegType);
+ }
+
+ /*
+ * Subfields needs some extra processing if there is no subfield
+ * setter, since we'll be feeding it to the normal register setter
+ * instead. The mask and value must be shifted and truncated to the
+ * subfield position.
+ */
+ if (pSubField && !pSubField->pfnSet)
+ {
+ /* The shift factor is for displaying a subfield value
+ 2**cShift times larger than the stored value. We have
+ to undo this before adjusting value and mask. */
+ if (pSubField->cShift)
+ {
+ /* Warn about trunction of the lower bits that get
+ shifted out below. */
+ if (rc == VINF_SUCCESS)
+ {
+ DBGFREGVAL Value2 = Value;
+ RTUInt128AssignAndNFirstBits(&Value2.u128, -pSubField->cShift);
+ if (!RTUInt128BitAreAllClear(&Value2.u128))
+ rc = VINF_DBGF_TRUNCATED_REGISTER;
+ }
+ RTUInt128AssignShiftRight(&Value.u128, pSubField->cShift);
+ }
+
+ DBGFREGVAL Value3 = Value;
+ RTUInt128AssignAndNFirstBits(&Value.u128, pSubField->cBits);
+ if (rc == VINF_SUCCESS && RTUInt128IsNotEqual(&Value.u128, &Value.u128))
+ rc = VINF_DBGF_TRUNCATED_REGISTER;
+ RTUInt128AssignAndNFirstBits(&Mask.u128, pSubField->cBits);
+
+ RTUInt128AssignShiftLeft(&Value.u128, pSubField->iFirstBit);
+ RTUInt128AssignShiftLeft(&Mask.u128, pSubField->iFirstBit);
+ }
+
+ /*
+ * Do the actual work on an EMT.
+ */
+ if (pSet->enmType == DBGFREGSETTYPE_CPU)
+ idDefCpu = pSet->uUserArg.pVCpu->idCpu;
+ else if (idDefCpu != VMCPUID_ANY)
+ idDefCpu &= ~DBGFREG_HYPER_VMCPUID;
+
+ int rc2 = VMR3ReqPriorityCallWaitU(pUVM, idDefCpu, (PFNRT)dbgfR3RegNmSetWorkerOnCpu, 4,
+ pUVM, pLookupRec, &Value, &Mask);
+
+ if (rc == VINF_SUCCESS || RT_FAILURE(rc2))
+ rc = rc2;
+ return rc;
+ }
+ return VERR_DBGF_READ_ONLY_REGISTER;
+ }
+ return VERR_DBGF_REGISTER_NOT_FOUND;
}
@@ -1922,7 +2166,6 @@ DECLINLINE(ssize_t) dbgfR3RegFormatValueInt(char *pszTmp, size_t cbTmp, PCDBGFRE
}
-
/**
* Format a register value, extended version.
*
@@ -1938,8 +2181,8 @@ DECLINLINE(ssize_t) dbgfR3RegFormatValueInt(char *pszTmp, size_t cbTmp, PCDBGFRE
* ignored.
* @param fFlags String formatting flags, RTSTR_F_XXX.
*/
-VMMDECL(ssize_t) DBGFR3RegFormatValueEx(char *pszBuf, size_t cbBuf, PCDBGFREGVAL pValue, DBGFREGVALTYPE enmType,
- unsigned uBase, signed int cchWidth, signed int cchPrecision, uint32_t fFlags)
+VMMR3DECL(ssize_t) DBGFR3RegFormatValueEx(char *pszBuf, size_t cbBuf, PCDBGFREGVAL pValue, DBGFREGVALTYPE enmType,
+ unsigned uBase, signed int cchWidth, signed int cchPrecision, uint32_t fFlags)
{
/*
* Format to temporary buffer using worker shared with dbgfR3RegPrintfCbFormatNormal.
@@ -1975,7 +2218,7 @@ VMMDECL(ssize_t) DBGFR3RegFormatValueEx(char *pszBuf, size_t cbBuf, PCDBGFREGVAL
* @param enmType The value type.
* @param fSpecial Same as RTSTR_F_SPECIAL.
*/
-VMMDECL(ssize_t) DBGFR3RegFormatValue(char *pszBuf, size_t cbBuf, PCDBGFREGVAL pValue, DBGFREGVALTYPE enmType, bool fSpecial)
+VMMR3DECL(ssize_t) DBGFR3RegFormatValue(char *pszBuf, size_t cbBuf, PCDBGFREGVAL pValue, DBGFREGVALTYPE enmType, bool fSpecial)
{
int cchWidth = 0;
switch (enmType)
@@ -2020,7 +2263,7 @@ dbgfR3RegPrintfCbFormatField(PDBGFR3REGPRINTFARGS pThis, PFNRTSTROUTPUT pfnOutpu
*/
DBGFREGVAL Value;
DBGFREGVALTYPE enmType;
- int rc = dbgfR3RegNmQueryWorkerOnCpu(pThis->pVM, pLookupRec, DBGFREGVALTYPE_END, &Value, &enmType);
+ int rc = dbgfR3RegNmQueryWorkerOnCpu(pThis->pUVM, pLookupRec, DBGFREGVALTYPE_END, &Value, &enmType);
if (RT_FAILURE(rc))
{
PCRTSTATUSMSG pErr = RTErrGet(rc);
@@ -2113,7 +2356,7 @@ dbgfR3RegPrintfCbFormatNormal(PDBGFR3REGPRINTFARGS pThis, PFNRTSTROUTPUT pfnOutp
*/
DBGFREGVAL Value;
DBGFREGVALTYPE enmType;
- int rc = dbgfR3RegNmQueryWorkerOnCpu(pThis->pVM, pLookupRec, DBGFREGVALTYPE_END, &Value, &enmType);
+ int rc = dbgfR3RegNmQueryWorkerOnCpu(pThis->pUVM, pLookupRec, DBGFREGVALTYPE_END, &Value, &enmType);
if (RT_FAILURE(rc))
{
PCRTSTATUSMSG pErr = RTErrGet(rc);
@@ -2176,15 +2419,16 @@ dbgfR3RegPrintfCbFormat(void *pvArg, PFNRTSTROUTPUT pfnOutput, void *pvArgOutput
* Look up the register - same as dbgfR3RegResolve, except for locking and
* input string termination.
*/
+ PRTSTRSPACE pRegSpace = &pThis->pUVM->dbgf.s.RegSpace;
/* Try looking up the name without any case folding or cpu prefixing. */
- PCDBGFREGLOOKUP pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGetN(&pThis->pVM->dbgf.s.RegSpace, pachReg, cchReg);
+ PCDBGFREGLOOKUP pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGetN(pRegSpace, pachReg, cchReg);
if (!pLookupRec)
{
/* Lower case it and try again. */
char szName[DBGF_REG_MAX_NAME * 4 + 16];
ssize_t cchFolded = dbgfR3RegCopyToLower(pachReg, cchReg, szName, sizeof(szName) - DBGF_REG_MAX_NAME);
if (cchFolded > 0)
- pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGet(&pThis->pVM->dbgf.s.RegSpace, szName);
+ pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGet(pRegSpace, szName);
if ( !pLookupRec
&& cchFolded >= 0
&& pThis->idCpu != VMCPUID_ANY)
@@ -2192,7 +2436,7 @@ dbgfR3RegPrintfCbFormat(void *pvArg, PFNRTSTROUTPUT pfnOutput, void *pvArgOutput
/* Prefix it with the specified CPU set. */
size_t cchCpuSet = RTStrPrintf(szName, sizeof(szName), pThis->fGuestRegs ? "cpu%u." : "hypercpu%u.", pThis->idCpu);
dbgfR3RegCopyToLower(pachReg, cchReg, &szName[cchCpuSet], sizeof(szName) - cchCpuSet);
- pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGet(&pThis->pVM->dbgf.s.RegSpace, szName);
+ pLookupRec = (PCDBGFREGLOOKUP)RTStrSpaceGet(pRegSpace, szName);
}
}
AssertMsgReturn(pLookupRec, ("'%s'\n", pszFormat), 0);
@@ -2269,9 +2513,9 @@ dbgfR3RegPrintfCbOutput(void *pvArg, const char *pachChars, size_t cbChars)
*/
static DECLCALLBACK(int) dbgfR3RegPrintfWorkerOnCpu(PDBGFR3REGPRINTFARGS pArgs)
{
- DBGF_REG_DB_LOCK_READ(pArgs->pVM);
+ DBGF_REG_DB_LOCK_READ(pArgs->pUVM);
RTStrFormatV(dbgfR3RegPrintfCbOutput, pArgs, dbgfR3RegPrintfCbFormat, pArgs, pArgs->pszFormat, pArgs->va);
- DBGF_REG_DB_UNLOCK_READ(pArgs->pVM);
+ DBGF_REG_DB_UNLOCK_READ(pArgs->pUVM);
return pArgs->rc;
}
@@ -2282,7 +2526,7 @@ static DECLCALLBACK(int) dbgfR3RegPrintfWorkerOnCpu(PDBGFR3REGPRINTFARGS pArgs)
* This is restricted to registers from one CPU, that specified by @a idCpu.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The CPU ID of any CPU registers that may be
* printed, pass VMCPUID_ANY if not applicable.
* @param pszBuf The output buffer.
@@ -2291,14 +2535,14 @@ static DECLCALLBACK(int) dbgfR3RegPrintfWorkerOnCpu(PDBGFR3REGPRINTFARGS pArgs)
* %VR{name}, they take no arguments.
* @param va Other format arguments.
*/
-VMMR3DECL(int) DBGFR3RegPrintfV(PVM pVM, VMCPUID idCpu, char *pszBuf, size_t cbBuf, const char *pszFormat, va_list va)
+VMMR3DECL(int) DBGFR3RegPrintfV(PUVM pUVM, VMCPUID idCpu, char *pszBuf, size_t cbBuf, const char *pszFormat, va_list va)
{
AssertPtrReturn(pszBuf, VERR_INVALID_POINTER);
AssertReturn(cbBuf > 0, VERR_BUFFER_OVERFLOW);
*pszBuf = '\0';
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
- AssertReturn((idCpu & ~DBGFREG_HYPER_VMCPUID) < pVM->cCpus || idCpu == VMCPUID_ANY, VERR_INVALID_CPU_ID);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn((idCpu & ~DBGFREG_HYPER_VMCPUID) < pUVM->cCpus || idCpu == VMCPUID_ANY, VERR_INVALID_CPU_ID);
AssertPtrReturn(pszFormat, VERR_INVALID_POINTER);
/*
@@ -2306,7 +2550,7 @@ VMMR3DECL(int) DBGFR3RegPrintfV(PVM pVM, VMCPUID idCpu, char *pszBuf, size_t cbB
* specified CPU.
*/
DBGFR3REGPRINTFARGS Args;
- Args.pVM = pVM;
+ Args.pUVM = pUVM;
Args.idCpu = idCpu != VMCPUID_ANY ? idCpu & ~DBGFREG_HYPER_VMCPUID : idCpu;
Args.fGuestRegs = idCpu != VMCPUID_ANY && !(idCpu & DBGFREG_HYPER_VMCPUID);
Args.pszBuf = pszBuf;
@@ -2315,7 +2559,7 @@ VMMR3DECL(int) DBGFR3RegPrintfV(PVM pVM, VMCPUID idCpu, char *pszBuf, size_t cbB
Args.offBuf = 0;
Args.cchLeftBuf = cbBuf - 1;
Args.rc = VINF_SUCCESS;
- int rc = VMR3ReqPriorityCallWait(pVM, Args.idCpu, (PFNRT)dbgfR3RegPrintfWorkerOnCpu, 1, &Args);
+ int rc = VMR3ReqPriorityCallWaitU(pUVM, Args.idCpu, (PFNRT)dbgfR3RegPrintfWorkerOnCpu, 1, &Args);
va_end(Args.va);
return rc;
}
@@ -2327,7 +2571,7 @@ VMMR3DECL(int) DBGFR3RegPrintfV(PVM pVM, VMCPUID idCpu, char *pszBuf, size_t cbB
* This is restricted to registers from one CPU, that specified by @a idCpu.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The CPU ID of any CPU registers that may be
* printed, pass VMCPUID_ANY if not applicable.
* @param pszBuf The output buffer.
@@ -2339,11 +2583,11 @@ VMMR3DECL(int) DBGFR3RegPrintfV(PVM pVM, VMCPUID idCpu, char *pszBuf, size_t cbB
* of these types takes any arguments.
* @param ... Other format arguments.
*/
-VMMR3DECL(int) DBGFR3RegPrintf(PVM pVM, VMCPUID idCpu, char *pszBuf, size_t cbBuf, const char *pszFormat, ...)
+VMMR3DECL(int) DBGFR3RegPrintf(PUVM pUVM, VMCPUID idCpu, char *pszBuf, size_t cbBuf, const char *pszFormat, ...)
{
va_list va;
va_start(va, pszFormat);
- int rc = DBGFR3RegPrintfV(pVM, idCpu, pszBuf, cbBuf, pszFormat, va);
+ int rc = DBGFR3RegPrintfV(pUVM, idCpu, pszBuf, cbBuf, pszFormat, va);
va_end(va);
return rc;
}
diff --git a/src/VBox/VMM/VMMR3/DBGFStack.cpp b/src/VBox/VMM/VMMR3/DBGFStack.cpp
index f01a6438..b6244e72 100644
--- a/src/VBox/VMM/VMMR3/DBGFStack.cpp
+++ b/src/VBox/VMM/VMMR3/DBGFStack.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -25,6 +25,7 @@
#include <VBox/vmm/mm.h>
#include "DBGFInternal.h"
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include <VBox/err.h>
#include <VBox/log.h>
#include <iprt/param.h>
@@ -37,9 +38,9 @@
/**
* Read stack memory.
*/
-DECLINLINE(int) dbgfR3Read(PVM pVM, VMCPUID idCpu, void *pvBuf, PCDBGFADDRESS pSrcAddr, size_t cb, size_t *pcbRead)
+DECLINLINE(int) dbgfR3Read(PUVM pUVM, VMCPUID idCpu, void *pvBuf, PCDBGFADDRESS pSrcAddr, size_t cb, size_t *pcbRead)
{
- int rc = DBGFR3MemRead(pVM, idCpu, pSrcAddr, pvBuf, cb);
+ int rc = DBGFR3MemRead(pUVM, idCpu, pSrcAddr, pvBuf, cb);
if (RT_FAILURE(rc))
{
/* fallback: byte by byte and zero the ones we fail to read. */
@@ -47,7 +48,7 @@ DECLINLINE(int) dbgfR3Read(PVM pVM, VMCPUID idCpu, void *pvBuf, PCDBGFADDRESS pS
for (cbRead = 0; cbRead < cb; cbRead++)
{
DBGFADDRESS Addr = *pSrcAddr;
- rc = DBGFR3MemRead(pVM, idCpu, DBGFR3AddrAdd(&Addr, cbRead), (uint8_t *)pvBuf + cbRead, 1);
+ rc = DBGFR3MemRead(pUVM, idCpu, DBGFR3AddrAdd(&Addr, cbRead), (uint8_t *)pvBuf + cbRead, 1);
if (RT_FAILURE(rc))
break;
}
@@ -76,7 +77,7 @@ DECLINLINE(int) dbgfR3Read(PVM pVM, VMCPUID idCpu, void *pvBuf, PCDBGFADDRESS pS
* @todo Add AMD64 support (needs teaming up with the module management for
* unwind tables).
*/
-static int dbgfR3StackWalk(PVM pVM, VMCPUID idCpu, RTDBGAS hAs, PDBGFSTACKFRAME pFrame)
+static int dbgfR3StackWalk(PUVM pUVM, VMCPUID idCpu, RTDBGAS hAs, PDBGFSTACKFRAME pFrame)
{
/*
* Stop if we got a read error in the previous run.
@@ -135,7 +136,7 @@ static int dbgfR3StackWalk(PVM pVM, VMCPUID idCpu, RTDBGAS hAs, PDBGFSTACKFRAME
uArgs.pb = u.pb + cbStackItem + cbRetAddr;
Assert(DBGFADDRESS_IS_VALID(&pFrame->AddrFrame));
- int rc = dbgfR3Read(pVM, idCpu, u.pv,
+ int rc = dbgfR3Read(pUVM, idCpu, u.pv,
pFrame->fFlags & DBGFSTACKFRAME_FLAGS_ALL_VALID
? &pFrame->AddrReturnFrame
: &pFrame->AddrFrame,
@@ -155,8 +156,9 @@ static int dbgfR3StackWalk(PVM pVM, VMCPUID idCpu, RTDBGAS hAs, PDBGFSTACKFRAME
/* Current PC - set by caller, just find symbol & line. */
if (DBGFADDRESS_IS_VALID(&pFrame->AddrPC))
{
- pFrame->pSymPC = DBGFR3AsSymbolByAddrA(pVM, hAs, &pFrame->AddrPC, NULL /*offDisp*/, NULL /*phMod*/);
- pFrame->pLinePC = DBGFR3LineByAddrAlloc(pVM, pFrame->AddrPC.FlatPtr, NULL);
+ pFrame->pSymPC = DBGFR3AsSymbolByAddrA(pUVM, hAs, &pFrame->AddrPC, RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL,
+ NULL /*poffDisp*/, NULL /*phMod*/);
+ pFrame->pLinePC = DBGFR3AsLineByAddrA(pUVM, hAs, &pFrame->AddrPC, NULL /*poffDisp*/, NULL /*phMod*/);
}
}
else /* 2nd and subsequent steps */
@@ -202,7 +204,7 @@ static int dbgfR3StackWalk(PVM pVM, VMCPUID idCpu, RTDBGAS hAs, PDBGFSTACKFRAME
pFrame->AddrReturnPC.off = *uRet.pu16;
}
else
- DBGFR3AddrFromFlat(pVM, &pFrame->AddrReturnPC, *uRet.pu16);
+ DBGFR3AddrFromFlat(pUVM, &pFrame->AddrReturnPC, *uRet.pu16);
break;
case DBGFRETURNTYPE_NEAR32:
if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
@@ -211,7 +213,7 @@ static int dbgfR3StackWalk(PVM pVM, VMCPUID idCpu, RTDBGAS hAs, PDBGFSTACKFRAME
pFrame->AddrReturnPC.off = *uRet.pu32;
}
else
- DBGFR3AddrFromFlat(pVM, &pFrame->AddrReturnPC, *uRet.pu32);
+ DBGFR3AddrFromFlat(pUVM, &pFrame->AddrReturnPC, *uRet.pu32);
break;
case DBGFRETURNTYPE_NEAR64:
if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
@@ -220,39 +222,40 @@ static int dbgfR3StackWalk(PVM pVM, VMCPUID idCpu, RTDBGAS hAs, PDBGFSTACKFRAME
pFrame->AddrReturnPC.off = *uRet.pu64;
}
else
- DBGFR3AddrFromFlat(pVM, &pFrame->AddrReturnPC, *uRet.pu64);
+ DBGFR3AddrFromFlat(pUVM, &pFrame->AddrReturnPC, *uRet.pu64);
break;
case DBGFRETURNTYPE_FAR16:
- DBGFR3AddrFromSelOff(pVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
+ DBGFR3AddrFromSelOff(pUVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
break;
case DBGFRETURNTYPE_FAR32:
- DBGFR3AddrFromSelOff(pVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
+ DBGFR3AddrFromSelOff(pUVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
break;
case DBGFRETURNTYPE_FAR64:
- DBGFR3AddrFromSelOff(pVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]);
+ DBGFR3AddrFromSelOff(pUVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]);
break;
case DBGFRETURNTYPE_IRET16:
- DBGFR3AddrFromSelOff(pVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
+ DBGFR3AddrFromSelOff(pUVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
break;
case DBGFRETURNTYPE_IRET32:
- DBGFR3AddrFromSelOff(pVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
+ DBGFR3AddrFromSelOff(pUVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
break;
case DBGFRETURNTYPE_IRET32_PRIV:
- DBGFR3AddrFromSelOff(pVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
+ DBGFR3AddrFromSelOff(pUVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
break;
case DBGFRETURNTYPE_IRET32_V86:
- DBGFR3AddrFromSelOff(pVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
+ DBGFR3AddrFromSelOff(pUVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
break;
case DBGFRETURNTYPE_IRET64:
- DBGFR3AddrFromSelOff(pVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]);
+ DBGFR3AddrFromSelOff(pUVM, idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]);
break;
default:
AssertMsgFailed(("enmReturnType=%d\n", pFrame->enmReturnType));
return VERR_INVALID_PARAMETER;
}
- pFrame->pSymReturnPC = DBGFR3AsSymbolByAddrA(pVM, hAs, &pFrame->AddrReturnPC, NULL /*offDisp*/, NULL /*phMod*/);
- pFrame->pLineReturnPC = DBGFR3LineByAddrAlloc(pVM, pFrame->AddrReturnPC.FlatPtr, NULL);
+ pFrame->pSymReturnPC = DBGFR3AsSymbolByAddrA(pUVM, hAs, &pFrame->AddrReturnPC, RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL,
+ NULL /*poffDisp*/, NULL /*phMod*/);
+ pFrame->pLineReturnPC = DBGFR3AsLineByAddrA(pUVM, hAs, &pFrame->AddrReturnPC, NULL /*poffDisp*/, NULL /*phMod*/);
/*
* Frame bitness flag.
@@ -277,7 +280,7 @@ static int dbgfR3StackWalk(PVM pVM, VMCPUID idCpu, RTDBGAS hAs, PDBGFSTACKFRAME
/**
* Walks the entire stack allocating memory as we walk.
*/
-static DECLCALLBACK(int) dbgfR3StackWalkCtxFull(PVM pVM, VMCPUID idCpu, PCCPUMCTXCORE pCtxCore, RTDBGAS hAs,
+static DECLCALLBACK(int) dbgfR3StackWalkCtxFull(PUVM pUVM, VMCPUID idCpu, PCCPUMCTXCORE pCtxCore, RTDBGAS hAs,
DBGFCODETYPE enmCodeType,
PCDBGFADDRESS pAddrFrame,
PCDBGFADDRESS pAddrStack,
@@ -286,7 +289,7 @@ static DECLCALLBACK(int) dbgfR3StackWalkCtxFull(PVM pVM, VMCPUID idCpu, PCCPUMCT
PCDBGFSTACKFRAME *ppFirstFrame)
{
/* alloc first frame. */
- PDBGFSTACKFRAME pCur = (PDBGFSTACKFRAME)MMR3HeapAllocZ(pVM, MM_TAG_DBGF_STACK, sizeof(*pCur));
+ PDBGFSTACKFRAME pCur = (PDBGFSTACKFRAME)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_STACK, sizeof(*pCur));
if (!pCur)
return VERR_NO_MEMORY;
@@ -300,9 +303,9 @@ static DECLCALLBACK(int) dbgfR3StackWalkCtxFull(PVM pVM, VMCPUID idCpu, PCCPUMCT
if (pAddrPC)
pCur->AddrPC = *pAddrPC;
else if (enmCodeType != DBGFCODETYPE_GUEST)
- DBGFR3AddrFromFlat(pVM, &pCur->AddrPC, pCtxCore->rip);
+ DBGFR3AddrFromFlat(pUVM, &pCur->AddrPC, pCtxCore->rip);
else
- rc = DBGFR3AddrFromSelOff(pVM, idCpu, &pCur->AddrPC, pCtxCore->cs.Sel, pCtxCore->rip);
+ rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrPC, pCtxCore->cs.Sel, pCtxCore->rip);
if (RT_SUCCESS(rc))
{
if (enmReturnType == DBGFRETURNTYPE_INVALID)
@@ -328,7 +331,7 @@ static DECLCALLBACK(int) dbgfR3StackWalkCtxFull(PVM pVM, VMCPUID idCpu, PCCPUMCT
fAddrMask = UINT64_MAX;
else
{
- PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
+ PVMCPU pVCpu = VMMGetCpuById(pUVM->pVM, idCpu);
CPUMMODE CpuMode = CPUMGetGuestMode(pVCpu);
if (CpuMode == CPUMMODE_REAL)
fAddrMask = UINT16_MAX;
@@ -342,16 +345,16 @@ static DECLCALLBACK(int) dbgfR3StackWalkCtxFull(PVM pVM, VMCPUID idCpu, PCCPUMCT
if (pAddrStack)
pCur->AddrStack = *pAddrStack;
else if (enmCodeType != DBGFCODETYPE_GUEST)
- DBGFR3AddrFromFlat(pVM, &pCur->AddrStack, pCtxCore->rsp & fAddrMask);
+ DBGFR3AddrFromFlat(pUVM, &pCur->AddrStack, pCtxCore->rsp & fAddrMask);
else
- rc = DBGFR3AddrFromSelOff(pVM, idCpu, &pCur->AddrStack, pCtxCore->ss.Sel, pCtxCore->rsp & fAddrMask);
+ rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrStack, pCtxCore->ss.Sel, pCtxCore->rsp & fAddrMask);
if (pAddrFrame)
pCur->AddrFrame = *pAddrFrame;
else if (enmCodeType != DBGFCODETYPE_GUEST)
- DBGFR3AddrFromFlat(pVM, &pCur->AddrFrame, pCtxCore->rbp & fAddrMask);
+ DBGFR3AddrFromFlat(pUVM, &pCur->AddrFrame, pCtxCore->rbp & fAddrMask);
else if (RT_SUCCESS(rc))
- rc = DBGFR3AddrFromSelOff(pVM, idCpu, &pCur->AddrFrame, pCtxCore->ss.Sel, pCtxCore->rbp & fAddrMask);
+ rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrFrame, pCtxCore->ss.Sel, pCtxCore->rbp & fAddrMask);
}
else
pCur->enmReturnType = enmReturnType;
@@ -360,7 +363,7 @@ static DECLCALLBACK(int) dbgfR3StackWalkCtxFull(PVM pVM, VMCPUID idCpu, PCCPUMCT
* The first frame.
*/
if (RT_SUCCESS(rc))
- rc = dbgfR3StackWalk(pVM, idCpu, hAs, pCur);
+ rc = dbgfR3StackWalk(pUVM, idCpu, hAs, pCur);
if (RT_FAILURE(rc))
{
DBGFR3StackWalkEnd(pCur);
@@ -374,12 +377,12 @@ static DECLCALLBACK(int) dbgfR3StackWalkCtxFull(PVM pVM, VMCPUID idCpu, PCCPUMCT
while (!(pCur->fFlags & (DBGFSTACKFRAME_FLAGS_LAST | DBGFSTACKFRAME_FLAGS_MAX_DEPTH | DBGFSTACKFRAME_FLAGS_LOOP)))
{
/* try walk. */
- rc = dbgfR3StackWalk(pVM, idCpu, hAs, &Next);
+ rc = dbgfR3StackWalk(pUVM, idCpu, hAs, &Next);
if (RT_FAILURE(rc))
break;
/* add the next frame to the chain. */
- PDBGFSTACKFRAME pNext = (PDBGFSTACKFRAME)MMR3HeapAlloc(pVM, MM_TAG_DBGF_STACK, sizeof(*pNext));
+ PDBGFSTACKFRAME pNext = (PDBGFSTACKFRAME)MMR3HeapAllocU(pUVM, MM_TAG_DBGF_STACK, sizeof(*pNext));
if (!pNext)
{
DBGFR3StackWalkEnd(pCur);
@@ -414,7 +417,7 @@ static DECLCALLBACK(int) dbgfR3StackWalkCtxFull(PVM pVM, VMCPUID idCpu, PCCPUMCT
* Common worker for DBGFR3StackWalkBeginGuestEx, DBGFR3StackWalkBeginHyperEx,
* DBGFR3StackWalkBeginGuest and DBGFR3StackWalkBeginHyper.
*/
-static int dbgfR3StackWalkBeginCommon(PVM pVM,
+static int dbgfR3StackWalkBeginCommon(PUVM pUVM,
VMCPUID idCpu,
DBGFCODETYPE enmCodeType,
PCDBGFADDRESS pAddrFrame,
@@ -427,14 +430,16 @@ static int dbgfR3StackWalkBeginCommon(PVM pVM,
* Validate parameters.
*/
*ppFirstFrame = NULL;
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
if (pAddrFrame)
- AssertReturn(DBGFR3AddrIsValid(pVM, pAddrFrame), VERR_INVALID_PARAMETER);
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrFrame), VERR_INVALID_PARAMETER);
if (pAddrStack)
- AssertReturn(DBGFR3AddrIsValid(pVM, pAddrStack), VERR_INVALID_PARAMETER);
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrStack), VERR_INVALID_PARAMETER);
if (pAddrPC)
- AssertReturn(DBGFR3AddrIsValid(pVM, pAddrPC), VERR_INVALID_PARAMETER);
+ AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrPC), VERR_INVALID_PARAMETER);
AssertReturn(enmReturnType >= DBGFRETURNTYPE_INVALID && enmReturnType < DBGFRETURNTYPE_END, VERR_INVALID_PARAMETER);
/*
@@ -459,9 +464,9 @@ static int dbgfR3StackWalkBeginCommon(PVM pVM,
default:
AssertFailedReturn(VERR_INVALID_PARAMETER);
}
- return VMR3ReqPriorityCallWait(pVM, idCpu, (PFNRT)dbgfR3StackWalkCtxFull, 10,
- pVM, idCpu, pCtxCore, hAs, enmCodeType,
- pAddrFrame, pAddrStack, pAddrPC, enmReturnType, ppFirstFrame);
+ return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3StackWalkCtxFull, 10,
+ pUVM, idCpu, pCtxCore, hAs, enmCodeType,
+ pAddrFrame, pAddrStack, pAddrPC, enmReturnType, ppFirstFrame);
}
@@ -475,7 +480,7 @@ static int dbgfR3StackWalkBeginCommon(PVM pVM,
* @returns VINF_SUCCESS on success.
* @returns VERR_NO_MEMORY if we're out of memory.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The ID of the virtual CPU which stack we want to walk.
* @param enmCodeType Code type
* @param pAddrFrame Frame address to start at. (Optional)
@@ -484,7 +489,7 @@ static int dbgfR3StackWalkBeginCommon(PVM pVM,
* @param enmReturnType The return address type. (Optional)
* @param ppFirstFrame Where to return the pointer to the first info frame.
*/
-VMMR3DECL(int) DBGFR3StackWalkBeginEx(PVM pVM,
+VMMR3DECL(int) DBGFR3StackWalkBeginEx(PUVM pUVM,
VMCPUID idCpu,
DBGFCODETYPE enmCodeType,
PCDBGFADDRESS pAddrFrame,
@@ -493,7 +498,7 @@ VMMR3DECL(int) DBGFR3StackWalkBeginEx(PVM pVM,
DBGFRETURNTYPE enmReturnType,
PCDBGFSTACKFRAME *ppFirstFrame)
{
- return dbgfR3StackWalkBeginCommon(pVM, idCpu, enmCodeType, pAddrFrame, pAddrStack, pAddrPC, enmReturnType, ppFirstFrame);
+ return dbgfR3StackWalkBeginCommon(pUVM, idCpu, enmCodeType, pAddrFrame, pAddrStack, pAddrPC, enmReturnType, ppFirstFrame);
}
@@ -507,14 +512,14 @@ VMMR3DECL(int) DBGFR3StackWalkBeginEx(PVM pVM,
* @returns VINF_SUCCESS on success.
* @returns VERR_NO_MEMORY if we're out of memory.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu The ID of the virtual CPU which stack we want to walk.
* @param enmCodeType Code type
* @param ppFirstFrame Where to return the pointer to the first info frame.
*/
-VMMR3DECL(int) DBGFR3StackWalkBegin(PVM pVM, VMCPUID idCpu, DBGFCODETYPE enmCodeType, PCDBGFSTACKFRAME *ppFirstFrame)
+VMMR3DECL(int) DBGFR3StackWalkBegin(PUVM pUVM, VMCPUID idCpu, DBGFCODETYPE enmCodeType, PCDBGFSTACKFRAME *ppFirstFrame)
{
- return dbgfR3StackWalkBeginCommon(pVM, idCpu, enmCodeType, NULL, NULL, NULL, DBGFRETURNTYPE_INVALID, ppFirstFrame);
+ return dbgfR3StackWalkBeginCommon(pUVM, idCpu, enmCodeType, NULL, NULL, NULL, DBGFRETURNTYPE_INVALID, ppFirstFrame);
}
/**
@@ -578,8 +583,8 @@ VMMR3DECL(void) DBGFR3StackWalkEnd(PCDBGFSTACKFRAME pFirstFrame)
RTDbgSymbolFree(pCur->pSymPC);
RTDbgSymbolFree(pCur->pSymReturnPC);
- DBGFR3LineFree(pCur->pLinePC);
- DBGFR3LineFree(pCur->pLineReturnPC);
+ RTDbgLineFree(pCur->pLinePC);
+ RTDbgLineFree(pCur->pLineReturnPC);
pCur->pNextInternal = NULL;
pCur->pFirstInternal = NULL;
diff --git a/src/VBox/VMM/VMMR3/DBGFSym.cpp b/src/VBox/VMM/VMMR3/DBGFSym.cpp
deleted file mode 100644
index 0ee582cc..00000000
--- a/src/VBox/VMM/VMMR3/DBGFSym.cpp
+++ /dev/null
@@ -1,1131 +0,0 @@
-/* $Id: DBGFSym.cpp $ */
-/** @file
- * DBGF - Debugger Facility, Symbol Management.
- */
-
-/*
- * Copyright (C) 2006-2007 Oracle Corporation
- *
- * This file is part of VirtualBox Open Source Edition (OSE), as
- * available from http://www.virtualbox.org. This file is free software;
- * you can redistribute it and/or modify it under the terms of the GNU
- * General Public License (GPL) as published by the Free Software
- * Foundation, in version 2 as it comes in the "COPYING" file of the
- * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
- * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
- */
-
-
-/*******************************************************************************
-* Header Files *
-*******************************************************************************/
-#define LOG_GROUP LOG_GROUP_DBGF
-#if defined(RT_OS_WINDOWS) && 0 //defined(DEBUG_bird) // enabled this is you want to debug win32 guests, the hypervisor of EFI.
-# include <Windows.h>
-# define _IMAGEHLP64
-# include <DbgHelp.h>
-# define HAVE_DBGHELP /* if doing guest stuff, this can be nice. */
-#endif
-/** @todo Only use DBGHELP for reading modules since it doesn't do all we want (relocations), or is way to slow in some cases (add symbol)! */
-#include <VBox/vmm/dbgf.h>
-#include <VBox/vmm/mm.h>
-#include <VBox/vmm/pdmapi.h>
-#include "DBGFInternal.h"
-#include <VBox/vmm/vm.h>
-#include <VBox/err.h>
-#include <VBox/log.h>
-
-#include <iprt/assert.h>
-#include <iprt/path.h>
-#include <iprt/ctype.h>
-#include <iprt/env.h>
-#include <iprt/param.h>
-#ifndef HAVE_DBGHELP
-# include <iprt/avl.h>
-# include <iprt/string.h>
-#endif
-
-#include <stdio.h> /* for fopen(). */ /** @todo use iprt/stream.h! */
-#include <stdlib.h>
-
-
-/*******************************************************************************
-* Internal Functions *
-*******************************************************************************/
-#ifdef HAVE_DBGHELP
-static DECLCALLBACK(int) dbgfR3EnumModules(PVM pVM, const char *pszFilename, const char *pszName,
- RTUINTPTR ImageBase, size_t cbImage, bool fRC, void *pvArg);
-static int win32Error(PVM pVM);
-#endif
-
-
-/*******************************************************************************
-* Structures and Typedefs *
-*******************************************************************************/
-#ifndef HAVE_DBGHELP
-/* later */
-typedef struct DBGFMOD *PDBGFMOD;
-
-/**
- * Internal representation of a symbol.
- */
-typedef struct DBGFSYM
-{
- /** Node core with the symbol address range. */
- AVLRGCPTRNODECORE Core;
- /** Pointer to the module this symbol is associated with. */
- PDBGFMOD pModule;
- /** Pointer to the next symbol in with this name. */
- struct DBGFSYM *pNext;
- /** Symbol name. */
- char szName[1];
-} DBGFSYM, *PDBGFSYM;
-
-/**
- * Symbol name space node.
- */
-typedef struct DBGFSYMSPACE
-{
- /** Node core with the symbol name.
- * (it's allocated in the same block as this struct) */
- RTSTRSPACECORE Core;
- /** Pointer to the first symbol with this name (LIFO). */
- PDBGFSYM pSym;
-} DBGFSYMSPACE, *PDBGFSYMSPACE;
-
-#endif
-
-
-
-/*******************************************************************************
-* Internal Functions *
-*******************************************************************************/
-#ifndef HAVE_DBGHELP
-
-/**
- * Initializes the symbol tree.
- */
-static int dbgfR3SymbolInit(PVM pVM)
-{
- PDBGFSYM pSym = (PDBGFSYM)MMR3HeapAlloc(pVM, MM_TAG_DBGF_SYMBOL, sizeof(*pSym));
- if (pSym)
- {
- pSym->Core.Key = 0;
- pSym->Core.KeyLast = ~0;
- pSym->pModule = NULL;
- pSym->szName[0] = '\0';
- if (RTAvlrGCPtrInsert(&pVM->dbgf.s.SymbolTree, &pSym->Core))
- return VINF_SUCCESS;
- AssertReleaseMsgFailed(("Failed to insert %RGv-%RGv!\n", pSym->Core.Key, pSym->Core.KeyLast));
- return VERR_INTERNAL_ERROR;
- }
- return VERR_NO_MEMORY;
-}
-
-
-/**
- * Insert a record into the symbol tree.
- */
-static int dbgfR3SymbolInsert(PVM pVM, const char *pszName, RTGCPTR Address, size_t cb, PDBGFMOD pModule)
-{
- /*
- * Make the address space node.
- */
- size_t cchName = strlen(pszName) + 1;
- PDBGFSYM pSym = (PDBGFSYM)MMR3HeapAlloc(pVM, MM_TAG_DBGF_SYMBOL, RT_OFFSETOF(DBGFSYM, szName[cchName]));
- if (pSym)
- {
- pSym->Core.Key = Address;
- pSym->Core.KeyLast = Address + cb;
- pSym->pModule = pModule;
- memcpy(pSym->szName, pszName, cchName);
-
- PDBGFSYM pOld = (PDBGFSYM)RTAvlrGCPtrRangeGet(&pVM->dbgf.s.SymbolTree, (RTGCPTR)Address);
- if (pOld)
- {
- pSym->Core.KeyLast = pOld->Core.KeyLast;
- if (pOld->Core.Key == pSym->Core.Key)
- {
- pOld = (PDBGFSYM)RTAvlrGCPtrRemove(&pVM->dbgf.s.SymbolTree, (RTGCPTR)Address);
- AssertRelease(pOld);
- MMR3HeapFree(pOld);
- }
- else
- pOld->Core.KeyLast = Address - 1;
- if (RTAvlrGCPtrInsert(&pVM->dbgf.s.SymbolTree, &pSym->Core))
- {
- /*
- * Make the name space node.
- */
- PDBGFSYMSPACE pName = (PDBGFSYMSPACE)RTStrSpaceGet(pVM->dbgf.s.pSymbolSpace, pszName);
- if (!pName)
- {
- /* make new symbol space node. */
- pName = (PDBGFSYMSPACE)MMR3HeapAlloc(pVM, MM_TAG_DBGF_SYMBOL, sizeof(*pName) + cchName);
- if (pName)
- {
- pName->Core.pszString = (char *)memcpy(pName + 1, pszName, cchName);
- pName->pSym = pSym;
- if (RTStrSpaceInsert(pVM->dbgf.s.pSymbolSpace, &pName->Core))
- return VINF_SUCCESS;
- }
- else
- return VINF_SUCCESS;
- }
- else
- {
- /* Add to existing symbol name. */
- pSym->pNext = pName->pSym;
- pName->pSym = pSym;
- return VINF_SUCCESS;
- }
- }
- AssertReleaseMsgFailed(("Failed to insert %RGv-%RGv!\n", pSym->Core.Key, pSym->Core.KeyLast));
- }
- else
- AssertMsgFailed(("pOld! %RGv %s\n", pSym->Core.Key, pszName));
- return VERR_INTERNAL_ERROR;
-
- }
- return VERR_NO_MEMORY;
-}
-
-
-/**
- * Get nearest symbol.
- * @returns NULL if no symbol was the for that address.
- */
-static PDBGFSYM dbgfR3SymbolGetAddr(PVM pVM, RTGCPTR Address)
-{
- PDBGFSYM pSym = (PDBGFSYM)RTAvlrGCPtrRangeGet(&pVM->dbgf.s.SymbolTree, Address);
- Assert(pSym);
- if (pSym && pSym->szName[0])
- return pSym;
- return NULL;
-}
-
-
-/**
- * Get first symbol.
- * @returns NULL if no symbol by that name.
- */
-static PDBGFSYM dbgfR3SymbolGetName(PVM pVM, const char *pszSymbol)
-{
- PDBGFSYMSPACE pName = (PDBGFSYMSPACE)RTStrSpaceGet(pVM->dbgf.s.pSymbolSpace, pszSymbol);
- if (pName)
- return pName->pSym;
- return NULL;
-}
-
-#endif
-
-
-/**
- * Strips all kind of spaces from head and tail of a string.
- */
-static char *dbgfR3Strip(char *psz)
-{
- while (*psz && RT_C_IS_SPACE(*psz))
- psz++;
- char *psz2 = strchr(psz, '\0') - 1;
- while (psz2 >= psz && RT_C_IS_SPACE(*psz2))
- *psz2-- = '\0';
- return psz;
-}
-
-
-/**
- * Initialize the debug info for a VM.
- *
- * This will check the CFGM for any symbols or symbol files
- * which needs loading.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- */
-int dbgfR3SymInit(PVM pVM)
-{
- int rc;
-
- /*
- * Initialize the symbol table.
- */
- pVM->dbgf.s.pSymbolSpace = (PRTSTRSPACE)MMR3HeapAllocZ(pVM, MM_TAG_DBGF_SYMBOL, sizeof(*pVM->dbgf.s.pSymbolSpace));
- AssertReturn(pVM->dbgf.s.pSymbolSpace, VERR_NO_MEMORY);
-
-#ifndef HAVE_DBGHELP
- /* modules & lines later */
- rc = dbgfR3SymbolInit(pVM);
- if (RT_FAILURE(rc))
- return rc;
- pVM->dbgf.s.fSymInited = true;
-#endif
-
- /** @todo symbol search path setup. */
-
- /*
- * Check if there are 'loadsyms' commands in the configuration.
- */
- PCFGMNODE pNode = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/DBGF/loadsyms/");
- if (pNode)
- {
- /*
- * Enumerate the commands.
- */
- for (PCFGMNODE pCmdNode = CFGMR3GetFirstChild(pNode);
- pCmdNode;
- pCmdNode = CFGMR3GetNextChild(pCmdNode))
- {
- char szCmdName[128];
- CFGMR3GetName(pCmdNode, &szCmdName[0], sizeof(szCmdName));
-
- /* File */
- char *pszFilename;
- rc = CFGMR3QueryStringAlloc(pCmdNode, "Filename", &pszFilename);
- AssertMsgRCReturn(rc, ("rc=%Rrc querying the 'File' attribute of '/DBGF/loadsyms/%s'!\n", rc, szCmdName), rc);
-
- /* Delta (optional) */
- RTGCINTPTR offDelta;
- rc = CFGMR3QueryGCPtrS(pNode, "Delta", &offDelta);
- if (rc == VERR_CFGM_VALUE_NOT_FOUND)
- offDelta = 0;
- else
- AssertMsgRCReturn(rc, ("rc=%Rrc querying the 'Delta' attribute of '/DBGF/loadsyms/%s'!\n", rc, szCmdName), rc);
-
- /* Module (optional) */
- char *pszModule;
- rc = CFGMR3QueryStringAlloc(pCmdNode, "Module", &pszModule);
- if (rc == VERR_CFGM_VALUE_NOT_FOUND)
- pszModule = NULL;
- else
- AssertMsgRCReturn(rc, ("rc=%Rrc querying the 'Module' attribute of '/DBGF/loadsyms/%s'!\n", rc, szCmdName), rc);
-
- /* Module (optional) */
- RTGCUINTPTR ModuleAddress;
- rc = CFGMR3QueryGCPtrU(pNode, "ModuleAddress", &ModuleAddress);
- if (rc == VERR_CFGM_VALUE_NOT_FOUND)
- ModuleAddress = 0;
- else
- AssertMsgRCReturn(rc, ("rc=%Rrc querying the 'ModuleAddress' attribute of '/DBGF/loadsyms/%s'!\n", rc, szCmdName), rc);
-
- /* Image size (optional) */
- RTGCUINTPTR cbModule;
- rc = CFGMR3QueryGCPtrU(pNode, "ModuleSize", &cbModule);
- if (rc == VERR_CFGM_VALUE_NOT_FOUND)
- cbModule = 0;
- else
- AssertMsgRCReturn(rc, ("rc=%Rrc querying the 'ModuleAddress' attribute of '/DBGF/loadsyms/%s'!\n", rc, szCmdName), rc);
-
-
- /*
- * Execute the command.
- */
- rc = DBGFR3ModuleLoad(pVM, pszFilename, offDelta, pszModule, ModuleAddress, cbModule);
- AssertMsgRCReturn(rc, ("pszFilename=%s offDelta=%RGv pszModule=%s ModuleAddress=%RGv cbModule=%RGv\n",
- pszFilename, offDelta, pszModule, ModuleAddress, cbModule), rc);
-
- MMR3HeapFree(pszModule);
- MMR3HeapFree(pszFilename);
- }
- }
-
- /*
- * Check if there are 'loadmap' commands in the configuration.
- */
- pNode = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/DBGF/loadmap/");
- if (pNode)
- {
- /*
- * Enumerate the commands.
- */
- for (PCFGMNODE pCmdNode = CFGMR3GetFirstChild(pNode);
- pCmdNode;
- pCmdNode = CFGMR3GetNextChild(pCmdNode))
- {
- char szCmdName[128];
- CFGMR3GetName(pCmdNode, &szCmdName[0], sizeof(szCmdName));
-
- /* File */
- char *pszFilename;
- rc = CFGMR3QueryStringAlloc(pCmdNode, "Filename", &pszFilename);
- AssertMsgRCReturn(rc, ("rc=%Rrc querying the 'File' attribute of '/DBGF/loadsyms/%s'!\n", rc, szCmdName), rc);
-
- /* Address. */
- RTGCPTR GCPtrAddr;
- rc = CFGMR3QueryGCPtrUDef(pNode, "Address", &GCPtrAddr, 0);
- AssertMsgRCReturn(rc, ("rc=%Rrc querying the 'Address' attribute of '/DBGF/loadsyms/%s'!\n", rc, szCmdName), rc);
- DBGFADDRESS ModAddr;
- DBGFR3AddrFromFlat(pVM, &ModAddr, GCPtrAddr);
-
- /* Name (optional) */
- char *pszModName;
- rc = CFGMR3QueryStringAllocDef(pCmdNode, "Name", &pszModName, NULL);
- AssertMsgRCReturn(rc, ("rc=%Rrc querying the 'Name' attribute of '/DBGF/loadsyms/%s'!\n", rc, szCmdName), rc);
-
- /* Subtrahend (optional) */
- RTGCPTR offSubtrahend;
- rc = CFGMR3QueryGCPtrDef(pNode, "Subtrahend", &offSubtrahend, 0);
- AssertMsgRCReturn(rc, ("rc=%Rrc querying the 'Subtrahend' attribute of '/DBGF/loadsyms/%s'!\n", rc, szCmdName), rc);
-
- /* Segment (optional) */
- uint32_t iSeg;
- rc = CFGMR3QueryU32Def(pNode, "Segment", &iSeg, UINT32_MAX);
- AssertMsgRCReturn(rc, ("rc=%Rrc querying the 'Segment' attribute of '/DBGF/loadsyms/%s'!\n", rc, szCmdName), rc);
-
- /*
- * Execute the command.
- */
- rc = DBGFR3AsLoadMap(pVM, DBGF_AS_GLOBAL, pszFilename, pszModName, &ModAddr,
- iSeg == UINT32_MAX ? NIL_RTDBGSEGIDX : iSeg, offSubtrahend, 0 /*fFlags*/);
- AssertMsgRCReturn(rc, ("pszFilename=%s pszModName=%s ModAddr=%RGv offSubtrahend=%#x iSeg=%#x\n",
- pszFilename, pszModName, ModAddr.FlatPtr, offSubtrahend, iSeg), rc);
-
- MMR3HeapFree(pszModName);
- MMR3HeapFree(pszFilename);
- }
- }
-
- /*
- * Check if there are any 'symadd' commands in the configuration.
- */
-
- return VINF_SUCCESS;
-}
-
-
-/**
- * We delay certain
- * Initialize the debug info for a VM.
- */
-int dbgfR3SymLazyInit(PVM pVM)
-{
- if (pVM->dbgf.s.fSymInited)
- return VINF_SUCCESS;
-#ifdef HAVE_DBGHELP
- if (SymInitialize(pVM, NULL, FALSE))
- {
- pVM->dbgf.s.fSymInited = true;
- SymSetOptions(SYMOPT_LOAD_LINES | SYMOPT_AUTO_PUBLICS | SYMOPT_ALLOW_ABSOLUTE_SYMBOLS);
-
- /*
- * Enumerate all modules loaded by PDM and add them to the symbol database.
- */
- PDMR3LdrEnumModules(pVM, dbgfR3EnumModules, NULL);
- return VINF_SUCCESS;
- }
- return win32Error(pVM);
-#else
- return VINF_SUCCESS;
-#endif
-}
-
-
-#ifdef HAVE_DBGHELP
-/**
- * Module enumeration callback function.
- *
- * @returns VBox status.
- * Failure will stop the search and return the return code.
- * Warnings will be ignored and not returned.
- * @param pVM Pointer to the VM.
- * @param pszFilename Module filename.
- * @param pszName Module name. (short and unique)
- * @param ImageBase Address where to executable image is loaded.
- * @param cbImage Size of the executable image.
- * @param fRC Set if guest context, clear if host context.
- * @param pvArg User argument.
- */
-static DECLCALLBACK(int) dbgfR3EnumModules(PVM pVM, const char *pszFilename, const char *pszName,
- RTUINTPTR ImageBase, size_t cbImage, bool fRC, void *pvArg)
-{
- DWORD64 LoadedImageBase = SymLoadModule64(pVM, NULL, (char *)(void *)pszFilename,
- (char *)(void *)pszName, ImageBase, (DWORD)cbImage);
- if (!LoadedImageBase)
- Log(("SymLoadModule64(,,%s,,) -> lasterr=%d\n", pszFilename, GetLastError()));
- else
- Log(("Loaded debuginfo for %s - %s %llx\n", pszName, pszFilename, LoadedImageBase));
-
- return VINF_SUCCESS;
-}
-#endif
-
-
-/**
- * Terminate the debug info repository for the specified VM.
- *
- * @returns VBox status.
- * @param pVM Pointer to the VM.
- */
-int dbgfR3SymTerm(PVM pVM)
-{
-#ifdef HAVE_DBGHELP
- if (pVM->dbgf.s.fSymInited)
- SymCleanup(pVM);
- pVM->dbgf.s.fSymInited = false;
- return VINF_SUCCESS;
-#else
- pVM->dbgf.s.SymbolTree = 0; /* MM cleans up allocations */
- pVM->dbgf.s.fSymInited = false;
- return VINF_SUCCESS;
-#endif
-}
-
-
-/** Symbol file type.. */
-typedef enum SYMFILETYPE
-{
- SYMFILETYPE_UNKNOWN,
- SYMFILETYPE_LD_MAP,
- SYMFILETYPE_MS_MAP,
- SYMFILETYPE_OBJDUMP,
- SYMFILETYPE_LINUX_SYSTEM_MAP,
- SYMFILETYPE_PDB,
- SYMFILETYPE_DBG,
- SYMFILETYPE_MZ,
- SYMFILETYPE_ELF
-} SYMFILETYPE, *PSYMFILETYPE;
-
-
-
-/**
- * Probe the type of a symbol information file.
- *
- * @returns The file type.
- * @param pFile File handle.
- */
-SYMFILETYPE dbgfR3ModuleProbe(FILE *pFile)
-{
- char szHead[4096];
- size_t cchHead = fread(szHead, 1, sizeof(szHead) - 1, pFile);
- if (cchHead > 0)
- {
- szHead[cchHead] = '\0';
- if (strstr(szHead, "Preferred load address is"))
- return SYMFILETYPE_MS_MAP;
-
- if ( strstr(szHead, "Archive member included because of")
- || strstr(szHead, "Memory Configuration")
- || strstr(szHead, "Linker script and memory map"))
- return SYMFILETYPE_LD_MAP;
-
- if ( RT_C_IS_XDIGIT(szHead[0])
- && RT_C_IS_XDIGIT(szHead[1])
- && RT_C_IS_XDIGIT(szHead[2])
- && RT_C_IS_XDIGIT(szHead[3])
- && RT_C_IS_XDIGIT(szHead[4])
- && RT_C_IS_XDIGIT(szHead[5])
- && RT_C_IS_XDIGIT(szHead[6])
- && RT_C_IS_XDIGIT(szHead[7])
- && szHead[8] == ' '
- && RT_C_IS_ALPHA(szHead[9])
- && szHead[10] == ' '
- && (RT_C_IS_ALPHA(szHead[11]) || szHead[11] == '_' || szHead[11] == '$')
- )
- return SYMFILETYPE_LINUX_SYSTEM_MAP;
-
- if ( RT_C_IS_XDIGIT(szHead[0])
- && RT_C_IS_XDIGIT(szHead[1])
- && RT_C_IS_XDIGIT(szHead[2])
- && RT_C_IS_XDIGIT(szHead[3])
- && RT_C_IS_XDIGIT(szHead[4])
- && RT_C_IS_XDIGIT(szHead[5])
- && RT_C_IS_XDIGIT(szHead[6])
- && RT_C_IS_XDIGIT(szHead[7])
- && RT_C_IS_XDIGIT(szHead[8])
- && RT_C_IS_XDIGIT(szHead[9])
- && RT_C_IS_XDIGIT(szHead[10])
- && RT_C_IS_XDIGIT(szHead[11])
- && RT_C_IS_XDIGIT(szHead[12])
- && RT_C_IS_XDIGIT(szHead[13])
- && RT_C_IS_XDIGIT(szHead[14])
- && RT_C_IS_XDIGIT(szHead[15])
- && szHead[16] == ' '
- && RT_C_IS_ALPHA(szHead[17])
- && szHead[18] == ' '
- && (RT_C_IS_ALPHA(szHead[19]) || szHead[19] == '_' || szHead[19] == '$')
- )
- return SYMFILETYPE_LINUX_SYSTEM_MAP;
-
- if (strstr(szHead, "Microsoft C/C++ MSF") == szHead)
- return SYMFILETYPE_PDB;
-
- if (strstr(szHead, "ELF") == szHead + 1)
- return SYMFILETYPE_ELF;
-
- if ( strstr(szHead, "MZ") == szHead
- || strstr(szHead, "PE") == szHead
- || strstr(szHead, "LE") == szHead
- || strstr(szHead, "LX") == szHead
- || strstr(szHead, "NE") == szHead)
- return SYMFILETYPE_MZ;
-
-
- if (strstr(szHead, "file format"))
- return SYMFILETYPE_OBJDUMP;
- }
-
- return SYMFILETYPE_UNKNOWN;
-}
-
-
-static int dbgfR3LoadLinuxSystemMap(PVM pVM, FILE *pFile, RTGCUINTPTR ModuleAddress, RTGCUINTPTR AddressDelta)
-{
- char szLine[4096];
- while (fgets(szLine, sizeof(szLine), pFile))
- {
- /* parse the line: <address> <type> <name> */
- const char *psz = dbgfR3Strip(szLine);
- char *pszEnd = NULL;
- uint64_t u64Address;
- int rc = RTStrToUInt64Ex(psz, &pszEnd, 16, &u64Address);
- RTGCUINTPTR Address = u64Address;
- if ( RT_SUCCESS(rc)
- && (*pszEnd == ' ' || *pszEnd == '\t')
- && Address == u64Address
- && u64Address != 0
- && u64Address != (RTGCUINTPTR)~0)
- {
- pszEnd++;
- if ( RT_C_IS_ALPHA(*pszEnd)
- && (pszEnd[1] == ' ' || pszEnd[1] == '\t'))
- {
- psz = dbgfR3Strip(pszEnd + 2);
- if (*psz)
- {
- int rc2 = DBGFR3SymbolAdd(pVM, ModuleAddress, Address + AddressDelta, 0, psz);
- if (RT_FAILURE(rc2))
- Log2(("DBGFR3SymbolAdd(,, %RGv, 0, '%s') -> %Rrc\n", Address, psz, rc2));
- }
- }
- }
- }
- return VINF_SUCCESS;
-}
-
-/**
- * Tries to open the file using the image search paths.
- *
- * This is currently a quick hack and the only way to specifying the path is by setting
- * VBOXDBG_IMAGE_PATH in the environment. It uses semicolon as separator everywhere.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pszFilename The name of the file to locate and open.
- * @param pszFound Where to return the actual filename.
- * @param cchFound The buffer size.
- * @param ppFile Where to return the opened file.
- */
-int dbgfR3ModuleLocateAndOpen(PVM pVM, const char *pszFilename, char *pszFound, size_t cchFound, FILE **ppFile)
-{
- NOREF(pVM);
-
- /* Check the filename length. */
- size_t const cchFilename = strlen(pszFilename);
- if (cchFilename >= cchFound)
- return VERR_FILENAME_TOO_LONG;
- const char *pszName = RTPathFilename(pszFilename);
- if (!pszName)
- return VERR_IS_A_DIRECTORY;
- size_t const cchName = strlen(pszName);
-
- /*
- * Try default location first.
- */
- memcpy(pszFound, pszFilename, cchFilename + 1);
- FILE *pFile = *ppFile = fopen(pszFound, "rb");
- if (pFile)
- return VINF_SUCCESS;
-
- /*
- * Walk the search path.
- */
- char *pszFreeMe = RTEnvDupEx(RTENV_DEFAULT, "VBOXDBG_IMAGE_PATH");
- const char *psz = pszFreeMe ? pszFreeMe : ".";
- while (*psz)
- {
- /* Skip leading blanks - no directories with leading spaces, thank you. */
- while (RT_C_IS_BLANK(*psz))
- psz++;
-
- /* Fine the end of this element. */
- const char *pszNext;
- const char *pszEnd = strchr(psz, ';');
- if (!pszEnd)
- pszEnd = pszNext = strchr(psz, '\0');
- else
- pszNext = pszEnd + 1;
- if (pszEnd != psz)
- {
- size_t const cch = pszEnd - psz;
- if (cch + 1 + cchName < cchFound)
- {
- /** @todo RTPathCompose, RTPathComposeN(). This code isn't right
- * for 'E:' on DOS systems. It may also create unwanted double slashes. */
- memcpy(pszFound, psz, cch);
- pszFound[cch] = '/';
- memcpy(pszFound + cch + 1, pszName, cchName + 1);
- *ppFile = pFile = fopen(pszFound, "rb");
- if (pFile)
- {
- RTStrFree(pszFreeMe);
- return VINF_SUCCESS;
- }
- }
-
- /** @todo do a depth search using the specified path. */
- }
-
- /* advance */
- psz = pszNext;
- }
-
- /* not found */
- RTStrFree(pszFreeMe);
- return VERR_OPEN_FAILED;
-}
-
-
-/**
- * Load debug info, optionally related to a specific module.
- *
- * @returns VBox status.
- * @param pVM Pointer to the VM.
- * @param pszFilename Path to the file containing the symbol information.
- * This can be the executable image, a flat symbol file of some kind or stripped debug info.
- * @param AddressDelta The value to add to the loaded symbols.
- * @param pszName Short hand name for the module. If not related to a module specify NULL.
- * @param ModuleAddress Address which the image is loaded at. This will be used to reference the module other places in the api.
- * Ignored when pszName is NULL.
- * @param cbImage Size of the image.
- * Ignored when pszName is NULL.
- */
-VMMR3DECL(int) DBGFR3ModuleLoad(PVM pVM, const char *pszFilename, RTGCUINTPTR AddressDelta, const char *pszName,
- RTGCUINTPTR ModuleAddress, unsigned cbImage)
-{
- NOREF(cbImage);
-
- /*
- * Lazy init.
- */
- if (!pVM->dbgf.s.fSymInited)
- {
- int rc = dbgfR3SymLazyInit(pVM);
- if (RT_FAILURE(rc))
- return rc;
- }
-
- /*
- * Open the load file.
- */
- FILE *pFile = NULL;
- char szFoundFile[RTPATH_MAX];
- int rc = dbgfR3ModuleLocateAndOpen(pVM, pszFilename, szFoundFile, sizeof(szFoundFile), &pFile);
- if (pFile)
- {
- /*
- * Probe the file type.
- */
- SYMFILETYPE enmType = dbgfR3ModuleProbe(pFile);
- if (enmType != SYMFILETYPE_UNKNOWN)
- {
- /*
- * Add the module.
- */
- if (pszName)
- {
- #ifdef HAVE_DBGHELP
- /** @todo arg! checkout the inserting of modules and then loading them again.... Or just the module representation.... */
- DWORD64 ImageBase = SymLoadModule64(pVM, NULL, (char *)(void *)szFoundFile, (char *)(void *)pszName, ModuleAddress, cbImage);
- if (!ImageBase)
- ImageBase = SymLoadModule64(pVM, NULL, (char *)(void *)pszName, (char *)(void *)pszName, ModuleAddress, cbImage);
- if (ImageBase)
- {
- AssertMsg(ModuleAddress == 0 || ModuleAddress == ImageBase, ("ModuleAddres=%RGv ImageBase=%llx\n", ModuleAddress, ImageBase));
- ModuleAddress = ImageBase;
- }
- else
- rc = win32Error(pVM);
- #else
- rc = VERR_NOT_IMPLEMENTED;
- #endif
- }
- if (RT_SUCCESS(rc))
- {
- /*
- * Seek to the start of the file.
- */
- rc = fseek(pFile, 0, SEEK_SET);
- Assert(!rc);
-
- /*
- * Process the specific.
- */
- switch (enmType)
- {
- case SYMFILETYPE_LINUX_SYSTEM_MAP:
- rc = dbgfR3LoadLinuxSystemMap(pVM, pFile, ModuleAddress, AddressDelta);
- break;
-
- case SYMFILETYPE_PDB:
- case SYMFILETYPE_DBG:
- case SYMFILETYPE_MZ:
- #ifdef HAVE_DBGHELP
- /* done it all above! */
- break;
- #endif
- case SYMFILETYPE_LD_MAP:
- case SYMFILETYPE_MS_MAP:
- case SYMFILETYPE_OBJDUMP:
- case SYMFILETYPE_ELF:
- rc = VERR_NOT_SUPPORTED;
- break;
-
- default:
- AssertFailed();
- rc = VERR_INTERNAL_ERROR;
- break;
- } /* file switch. */
- } /* module added successfully. */
- } /* format identified */
- else
- rc = VERR_NOT_SUPPORTED;
- /** @todo check for read errors */
- fclose(pFile);
- }
- return rc;
-}
-
-
-/**
- * Interface used by PDMR3LdrRelocate for telling us that a GC module has been relocated.
- *
- * @param pVM Pointer to the VM.
- * @param OldImageBase The old image base.
- * @param NewImageBase The new image base.
- * @param cbImage The image size.
- * @param pszFilename The image filename.
- * @param pszName The module name.
- */
-VMMR3DECL(void) DBGFR3ModuleRelocate(PVM pVM, RTGCUINTPTR OldImageBase, RTGCUINTPTR NewImageBase, RTGCUINTPTR cbImage,
- const char *pszFilename, const char *pszName)
-{
-#ifdef HAVE_DBGHELP
- if (pVM->dbgf.s.fSymInited)
- {
- if (!SymUnloadModule64(pVM, OldImageBase))
- Log(("SymUnloadModule64(,%RGv) failed, lasterr=%d\n", OldImageBase, GetLastError()));
-
- DWORD ImageSize = (DWORD)cbImage; Assert(ImageSize == cbImage);
- DWORD64 LoadedImageBase = SymLoadModule64(pVM, NULL, (char *)(void *)pszFilename, (char *)(void *)pszName, NewImageBase, ImageSize);
- if (!LoadedImageBase)
- Log(("SymLoadModule64(,,%s,,) -> lasterr=%d (relocate)\n", pszFilename, GetLastError()));
- else
- Log(("Reloaded debuginfo for %s - %s %llx\n", pszName, pszFilename, LoadedImageBase));
- }
-#else
- NOREF(pVM); NOREF(OldImageBase); NOREF(NewImageBase); NOREF(cbImage); NOREF(pszFilename); NOREF(pszName);
-#endif
-}
-
-
-/**
- * Adds a symbol to the debug info manager.
- *
- * @returns VBox status.
- * @param pVM Pointer to the VM.
- * @param ModuleAddress Module address. Use 0 if no module.
- * @param SymbolAddress Symbol address
- * @param cbSymbol Size of the symbol. Use 0 if info not available.
- * @param pszSymbol Symbol name.
- */
-VMMR3DECL(int) DBGFR3SymbolAdd(PVM pVM, RTGCUINTPTR ModuleAddress, RTGCUINTPTR SymbolAddress, RTUINT cbSymbol,
- const char *pszSymbol)
-{
- /*
- * Validate.
- */
- if (!pszSymbol || !*pszSymbol)
- {
- AssertMsgFailed(("No symbol name!\n"));
- return VERR_INVALID_PARAMETER;
- }
-
- /*
- * Lazy init.
- */
- if (!pVM->dbgf.s.fSymInited)
- {
- int rc = dbgfR3SymLazyInit(pVM);
- if (RT_FAILURE(rc))
- return rc;
- }
-
-#ifdef HAVE_DBGHELP
- if (SymAddSymbol(pVM, ModuleAddress, (char *)(void *)pszSymbol, SymbolAddress, cbSymbol, 0))
- return VINF_SUCCESS;
- return win32Error(pVM);
-#else
- NOREF(ModuleAddress); /** @todo module lookup. */
- return dbgfR3SymbolInsert(pVM, pszSymbol, SymbolAddress, cbSymbol, NULL);
-#endif
-}
-
-
-/**
- * Find symbol by address (nearest).
- *
- * @returns VBox status.
- * @param pVM Pointer to the VM.
- * @param Address Address.
- * @param poffDisplacement Where to store the symbol displacement from Address.
- * @param pSymbol Where to store the symbol info.
- */
-VMMR3DECL(int) DBGFR3SymbolByAddr(PVM pVM, RTGCUINTPTR Address, PRTGCINTPTR poffDisplacement, PDBGFSYMBOL pSymbol)
-{
- /*
- * Lazy init.
- */
- if (!pVM->dbgf.s.fSymInited)
- {
- int rc = dbgfR3SymLazyInit(pVM);
- if (RT_FAILURE(rc))
- return rc;
- }
-
- /*
- * Look it up.
- */
-#ifdef HAVE_DBGHELP
- char achBuffer[sizeof(IMAGEHLP_SYMBOL64) + DBGF_SYMBOL_NAME_LENGTH * sizeof(TCHAR) + sizeof(ULONG64)];
- PIMAGEHLP_SYMBOL64 pSym = (PIMAGEHLP_SYMBOL64)&achBuffer[0];
- pSym->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
- pSym->MaxNameLength = DBGF_SYMBOL_NAME_LENGTH;
-
- if (SymGetSymFromAddr64(pVM, Address, (PDWORD64)poffDisplacement, pSym))
- {
- pSymbol->Value = (RTGCUINTPTR)pSym->Address;
- pSymbol->cb = pSym->Size;
- pSymbol->fFlags = pSym->Flags;
- strcpy(pSymbol->szName, pSym->Name);
- return VINF_SUCCESS;
- }
- //return win32Error(pVM);
-
-#else
-
- PDBGFSYM pSym = dbgfR3SymbolGetAddr(pVM, Address);
- if (pSym)
- {
- pSymbol->Value = pSym->Core.Key;
- pSymbol->cb = pSym->Core.KeyLast - pSym->Core.Key + 1;
- pSymbol->fFlags = 0;
- pSymbol->szName[0] = '\0';
- strncat(pSymbol->szName, pSym->szName, sizeof(pSymbol->szName) - 1);
- if (poffDisplacement)
- *poffDisplacement = Address - pSymbol->Value;
- return VINF_SUCCESS;
- }
-
-#endif
-
- /*
- * Try PDM.
- */
- if (MMHyperIsInsideArea(pVM, Address))
- {
- char szModName[64];
- RTRCPTR RCPtrMod;
- char szNearSym1[260];
- RTRCPTR RCPtrNearSym1;
- char szNearSym2[260];
- RTRCPTR RCPtrNearSym2;
- int rc = PDMR3LdrQueryRCModFromPC(pVM, Address,
- &szModName[0], sizeof(szModName), &RCPtrMod,
- &szNearSym1[0], sizeof(szNearSym1), &RCPtrNearSym1,
- &szNearSym2[0], sizeof(szNearSym2), &RCPtrNearSym2);
- if (RT_SUCCESS(rc) && szNearSym1[0])
- {
- pSymbol->Value = RCPtrNearSym1;
- pSymbol->cb = RCPtrNearSym2 > RCPtrNearSym1 ? RCPtrNearSym2 - RCPtrNearSym1 : 0;
- pSymbol->fFlags = 0;
- pSymbol->szName[0] = '\0';
- strncat(pSymbol->szName, szNearSym1, sizeof(pSymbol->szName) - 1);
- if (poffDisplacement)
- *poffDisplacement = Address - pSymbol->Value;
- return VINF_SUCCESS;
- }
- }
-
- return VERR_SYMBOL_NOT_FOUND;
-}
-
-
-/**
- * Find symbol by name (first).
- *
- * @returns VBox status.
- * @param pVM Pointer to the VM.
- * @param pszSymbol Symbol name.
- * @param pSymbol Where to store the symbol info.
- */
-VMMR3DECL(int) DBGFR3SymbolByName(PVM pVM, const char *pszSymbol, PDBGFSYMBOL pSymbol)
-{
- /*
- * Lazy init.
- */
- if (!pVM->dbgf.s.fSymInited)
- {
- int rc = dbgfR3SymLazyInit(pVM);
- if (RT_FAILURE(rc))
- return rc;
- }
-
- /*
- * Look it up.
- */
-#ifdef HAVE_DBGHELP
- char achBuffer[sizeof(IMAGEHLP_SYMBOL64) + DBGF_SYMBOL_NAME_LENGTH * sizeof(TCHAR) + sizeof(ULONG64)];
- PIMAGEHLP_SYMBOL64 pSym = (PIMAGEHLP_SYMBOL64)&achBuffer[0];
- pSym->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
- pSym->MaxNameLength = DBGF_SYMBOL_NAME_LENGTH;
-
- if (SymGetSymFromName64(pVM, (char *)(void *)pszSymbol, pSym))
- {
- pSymbol->Value = (RTGCUINTPTR)pSym->Address;
- pSymbol->cb = pSym->Size;
- pSymbol->fFlags = pSym->Flags;
- strcpy(pSymbol->szName, pSym->Name);
- return VINF_SUCCESS;
- }
- return win32Error(pVM);
-#else
-
- PDBGFSYM pSym = dbgfR3SymbolGetName(pVM, pszSymbol);
- if (pSym)
- {
- pSymbol->Value = pSym->Core.Key;
- pSymbol->cb = pSym->Core.KeyLast - pSym->Core.Key + 1;
- pSymbol->fFlags = 0;
- pSymbol->szName[0] = '\0';
- strncat(pSymbol->szName, pSym->szName, sizeof(pSymbol->szName) - 1);
- return VINF_SUCCESS;
- }
-
- return VERR_SYMBOL_NOT_FOUND;
-#endif
-}
-
-
-/**
- * Find line by address (nearest).
- *
- * @returns VBox status.
- * @param pVM Pointer to the VM.
- * @param Address Address.
- * @param poffDisplacement Where to store the line displacement from Address.
- * @param pLine Where to store the line info.
- */
-VMMR3DECL(int) DBGFR3LineByAddr(PVM pVM, RTGCUINTPTR Address, PRTGCINTPTR poffDisplacement, PDBGFLINE pLine)
-{
- /*
- * Lazy init.
- */
- if (!pVM->dbgf.s.fSymInited)
- {
- int rc = dbgfR3SymLazyInit(pVM);
- if (RT_FAILURE(rc))
- return rc;
- }
-
- /*
- * Look it up.
- */
-#ifdef HAVE_DBGHELP
- IMAGEHLP_LINE64 Line = {0};
- DWORD off = 0;
- Line.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
- if (SymGetLineFromAddr64(pVM, Address, &off, &Line))
- {
- if (poffDisplacement)
- *poffDisplacement = (long)off;
- pLine->Address = (RTGCUINTPTR)Line.Address;
- pLine->uLineNo = Line.LineNumber;
- pLine->szFilename[0] = '\0';
- strncat(pLine->szFilename, Line.FileName, sizeof(pLine->szFilename));
- return VINF_SUCCESS;
- }
- return win32Error(pVM);
-#else
- NOREF(pVM); NOREF(Address); NOREF(poffDisplacement); NOREF(pLine);
- return VERR_NOT_IMPLEMENTED;
-#endif
-}
-
-
-/**
- * Duplicates a line.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pLine The line to duplicate.
- */
-static PDBGFLINE dbgfR3LineDup(PVM pVM, PCDBGFLINE pLine)
-{
- size_t cb = strlen(pLine->szFilename) + RT_OFFSETOF(DBGFLINE, szFilename[1]);
- PDBGFLINE pDup = (PDBGFLINE)MMR3HeapAlloc(pVM, MM_TAG_DBGF_LINE_DUP, cb);
- if (pDup)
- memcpy(pDup, pLine, cb);
- return pDup;
-}
-
-
-/**
- * Find line by address (nearest), allocate return buffer.
- *
- * @returns Pointer to the line. Must be freed using DBGFR3LineFree().
- * @returns NULL if the line was not found or if we're out of memory.
- * @param pVM Pointer to the VM.
- * @param Address Address.
- * @param poffDisplacement Where to store the line displacement from Address.
- */
-VMMR3DECL(PDBGFLINE) DBGFR3LineByAddrAlloc(PVM pVM, RTGCUINTPTR Address, PRTGCINTPTR poffDisplacement)
-{
- DBGFLINE Line;
- int rc = DBGFR3LineByAddr(pVM, Address, poffDisplacement, &Line);
- if (RT_FAILURE(rc))
- return NULL;
- return dbgfR3LineDup(pVM, &Line);
-}
-
-
-/**
- * Frees a line returned by DBGFR3LineByAddressAlloc().
- *
- * @param pLine Pointer to the line.
- */
-VMMR3DECL(void) DBGFR3LineFree(PDBGFLINE pLine)
-{
- if (pLine)
- MMR3HeapFree(pLine);
-}
-
-
-#ifdef HAVE_DBGHELP
-
-//static BOOL CALLBACK win32EnumModulesCallback(PSTR ModuleName, DWORD64 BaseOfDll, PVOID UserContext)
-//{
-// Log(("dbg: module: %08llx %s\n", ModuleName, BaseOfDll));
-// return TRUE;
-//}
-
-static int win32Error(PVM pVM)
-{
- int rc = GetLastError();
- Log(("Lasterror=%d\n", rc));
-
- //SymEnumerateModules64(pVM, win32EnumModulesCallback, NULL);
-
- return VERR_GENERAL_FAILURE;
-}
-#endif
-
diff --git a/src/VBox/VMM/VMMR3/EM.cpp b/src/VBox/VMM/VMMR3/EM.cpp
index c8843db1..024cbec1 100644
--- a/src/VBox/VMM/VMMR3/EM.cpp
+++ b/src/VBox/VMM/VMMR3/EM.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2011 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -21,11 +21,11 @@
* the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
* Interpreted), and keeping the CPU states in sync. The function
* EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
- * modes has different inner loops (emR3RawExecute, emR3HwAccExecute, and
+ * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
* emR3RemExecute).
*
* The interpreted execution is only used to avoid switching between
- * raw-mode/hwaccm and the recompiler when fielding virtualization traps/faults.
+ * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
* The interpretation is thus implemented as part of EM.
*
* @see grp_em
@@ -41,13 +41,12 @@
#include <VBox/vmm/csam.h>
#include <VBox/vmm/selm.h>
#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/iem.h>
#include <VBox/vmm/iom.h>
#include <VBox/vmm/dbgf.h>
#include <VBox/vmm/pgm.h>
#ifdef VBOX_WITH_REM
# include <VBox/vmm/rem.h>
-#else
-# include <VBox/vmm/iem.h>
#endif
#include <VBox/vmm/tm.h>
#include <VBox/vmm/mm.h>
@@ -55,14 +54,11 @@
#include <VBox/vmm/pdmapi.h>
#include <VBox/vmm/pdmcritsect.h>
#include <VBox/vmm/pdmqueue.h>
-#include <VBox/vmm/hwaccm.h>
+#include <VBox/vmm/hm.h>
#include <VBox/vmm/patm.h>
-#ifdef IEM_VERIFICATION_MODE
-# include <VBox/vmm/iem.h>
-#endif
#include "EMInternal.h"
-#include "internal/em.h"
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include <VBox/vmm/cpumdis.h>
#include <VBox/dis.h>
#include <VBox/disopcode.h>
@@ -79,7 +75,7 @@
* Defined Constants And Macros *
*******************************************************************************/
#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
-#define EM_NOTIFY_HWACCM
+#define EM_NOTIFY_HM
#endif
@@ -91,7 +87,7 @@ static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, u
#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
static const char *emR3GetStateName(EMSTATE enmState);
#endif
-static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc);
+static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
@@ -103,7 +99,7 @@ int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) EMR3Init(PVM pVM)
+VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
{
LogFlow(("EMR3Init\n"));
/*
@@ -117,12 +113,39 @@ VMMR3DECL(int) EMR3Init(PVM pVM)
* Init the structure.
*/
pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
+ PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
+ PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
+
bool fEnabled;
- int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR3Enabled", &fEnabled);
- pVM->fRecompileUser = RT_SUCCESS(rc) ? !fEnabled : false;
- rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR0Enabled", &fEnabled);
- pVM->fRecompileSupervisor = RT_SUCCESS(rc) ? !fEnabled : false;
- Log(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool\n", pVM->fRecompileUser, pVM->fRecompileSupervisor));
+ int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
+ AssertLogRelRCReturn(rc, rc);
+ pVM->fRecompileUser = !fEnabled;
+
+ rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
+ AssertLogRelRCReturn(rc, rc);
+ pVM->fRecompileSupervisor = !fEnabled;
+
+#ifdef VBOX_WITH_RAW_RING1
+ rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
+ AssertLogRelRCReturn(rc, rc);
+#else
+ pVM->fRawRing1Enabled = false; /* Disabled by default. */
+#endif
+
+ rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
+ AssertLogRelRCReturn(rc, rc);
+
+ rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
+ AssertLogRelRCReturn(rc, rc);
+ pVM->em.s.fGuruOnTripleFault = !fEnabled;
+ if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
+ {
+ LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
+ pVM->em.s.fGuruOnTripleFault = true;
+ }
+
+ Log(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
+ pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
#ifdef VBOX_WITH_REM
/*
@@ -147,15 +170,18 @@ VMMR3DECL(int) EMR3Init(PVM pVM)
{
PVMCPU pVCpu = &pVM->aCpus[i];
- pVCpu->em.s.offVMCPU = RT_OFFSETOF(VMCPU, em.s);
-
pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
pVCpu->em.s.enmPrevState = EMSTATE_NONE;
pVCpu->em.s.fForceRAW = false;
pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
- pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
- AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
+#ifdef VBOX_WITH_RAW_MODE
+ if (!HMIsEnabled(pVM))
+ {
+ pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
+ AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
+ }
+#endif
/* Force reset of the time slice. */
pVCpu->em.s.u64TimeSliceStart = 0;
@@ -267,6 +293,8 @@ VMMR3DECL(int) EMR3Init(PVM pVM)
EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
+ EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
+ EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
@@ -321,6 +349,8 @@ VMMR3DECL(int) EMR3Init(PVM pVM)
EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
+ EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
+ EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
@@ -356,11 +386,15 @@ VMMR3DECL(int) EMR3Init(PVM pVM)
EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
- EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
- EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
+ EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
+# ifdef VBOX_WITH_FIRST_IEM_STEP
+ EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
+# else
EM_REG_COUNTER_USED(&pStats->StatIn, "/EM/CPU%d/R3/PrivInst/In", "Number of in instructions.");
EM_REG_COUNTER_USED(&pStats->StatOut, "/EM/CPU%d/R3/PrivInst/Out", "Number of out instructions.");
- EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "Number of restarted i/o instructions.");
+# endif
+ EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
+ EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
@@ -388,16 +422,18 @@ VMMR3DECL(int) EMR3Init(PVM pVM)
pVCpu->em.s.pCliStatTree = 0;
/* these should be considered for release statistics. */
- EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
- EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
- EM_REG_PROFILE(&pVCpu->em.s.StatHwAccEntry, "/PROF/CPU%d/EM/HwAccEnter", "Profiling Hardware Accelerated Mode entry overhead.");
- EM_REG_PROFILE(&pVCpu->em.s.StatHwAccExec, "/PROF/CPU%d/EM/HwAccExec", "Profiling Hardware Accelerated Mode execution.");
- EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
- EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
- EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
- EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
- EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
- EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
+ EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
+ EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
+ EM_REG_PROFILE(&pVCpu->em.s.StatHmEntry, "/PROF/CPU%d/EM/HmEnter", "Profiling Hardware Accelerated Mode entry overhead.");
+ EM_REG_PROFILE(&pVCpu->em.s.StatHmExec, "/PROF/CPU%d/EM/HmExec", "Profiling Hardware Accelerated Mode execution.");
+ EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
+ EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
+ EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
+ EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
+ EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
+ EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
+ EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
+ EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
#endif /* VBOX_WITH_STATISTICS */
@@ -410,6 +446,7 @@ VMMR3DECL(int) EMR3Init(PVM pVM)
EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
}
+ emR3InitDbg(pVM);
return VINF_SUCCESS;
}
@@ -421,7 +458,7 @@ VMMR3DECL(int) EMR3Init(PVM pVM)
*
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(void) EMR3Relocate(PVM pVM)
+VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
{
LogFlow(("EMR3Relocate\n"));
for (VMCPUID i = 0; i < pVM->cCpus; i++)
@@ -440,7 +477,7 @@ VMMR3DECL(void) EMR3Relocate(PVM pVM)
*
* @param pVCpu Pointer to the VMCPU.
*/
-VMMR3DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
+VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
{
pVCpu->em.s.fForceRAW = false;
@@ -460,7 +497,7 @@ VMMR3DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
*
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(void) EMR3Reset(PVM pVM)
+VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
{
Log(("EMR3Reset: \n"));
for (VMCPUID i = 0; i < pVM->cCpus; i++)
@@ -477,7 +514,7 @@ VMMR3DECL(void) EMR3Reset(PVM pVM)
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) EMR3Term(PVM pVM)
+VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
{
AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
@@ -541,9 +578,8 @@ static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, u
/*
* Validate version.
*/
- if ( uVersion != EM_SAVED_STATE_VERSION
- && uVersion != EM_SAVED_STATE_VERSION_PRE_MWAIT
- && uVersion != EM_SAVED_STATE_VERSION_PRE_SMP)
+ if ( uVersion > EM_SAVED_STATE_VERSION
+ || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
{
AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
@@ -623,26 +659,31 @@ static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu,
case EMEXECPOLICY_RECOMPILE_RING3:
pVM->fRecompileUser = pArgs->fEnforce;
break;
+ case EMEXECPOLICY_IEM_ALL:
+ pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
+ break;
default:
AssertFailedReturn(VERR_INVALID_PARAMETER);
}
- Log(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool\n",
- pVM->fRecompileUser, pVM->fRecompileSupervisor));
+ Log(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
+ pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
}
/*
- * Force rescheduling if in RAW, HWACCM or REM.
+ * Force rescheduling if in RAW, HM, IEM, or REM.
*/
return pVCpu->em.s.enmState == EMSTATE_RAW
- || pVCpu->em.s.enmState == EMSTATE_HWACC
+ || pVCpu->em.s.enmState == EMSTATE_HM
+ || pVCpu->em.s.enmState == EMSTATE_IEM
|| pVCpu->em.s.enmState == EMSTATE_REM
+ || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
? VINF_EM_RESCHEDULE
: VINF_SUCCESS;
}
/**
- * Changes a the execution scheduling policy.
+ * Changes an execution scheduling policy parameter.
*
* This is used to enable or disable raw-mode / hardware-virtualization
* execution of user and supervisor code.
@@ -651,17 +692,54 @@ static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu,
* @returns VINF_RESCHEDULE if a rescheduling might be required.
* @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param enmPolicy The scheduling policy to change.
* @param fEnforce Whether to enforce the policy or not.
*/
-VMMR3DECL(int) EMR3SetExecutionPolicy(PVM pVM, EMEXECPOLICY enmPolicy, bool fEnforce)
+VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
{
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
- return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
+ return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
+}
+
+
+/**
+ * Queries an execution scheduling policy parameter.
+ *
+ * @returns VBox status code
+ * @param pUVM The user mode VM handle.
+ * @param enmPolicy The scheduling policy to query.
+ * @param pfEnforced Where to return the current value.
+ */
+VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
+{
+ AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /* No need to bother EMTs with a query. */
+ switch (enmPolicy)
+ {
+ case EMEXECPOLICY_RECOMPILE_RING0:
+ *pfEnforced = pVM->fRecompileSupervisor;
+ break;
+ case EMEXECPOLICY_RECOMPILE_RING3:
+ *pfEnforced = pVM->fRecompileUser;
+ break;
+ case EMEXECPOLICY_IEM_ALL:
+ *pfEnforced = pVM->em.s.fIemExecutesAll;
+ break;
+ default:
+ AssertFailedReturn(VERR_INTERNAL_ERROR_2);
+ }
+
+ return VINF_SUCCESS;
}
@@ -695,16 +773,20 @@ static const char *emR3GetStateName(EMSTATE enmState)
{
case EMSTATE_NONE: return "EMSTATE_NONE";
case EMSTATE_RAW: return "EMSTATE_RAW";
- case EMSTATE_HWACC: return "EMSTATE_HWACC";
+ case EMSTATE_HM: return "EMSTATE_HM";
+ case EMSTATE_IEM: return "EMSTATE_IEM";
case EMSTATE_REM: return "EMSTATE_REM";
case EMSTATE_HALTED: return "EMSTATE_HALTED";
case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
+ case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
+ case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
+ case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
default: return "Unknown!";
}
}
@@ -719,30 +801,41 @@ static const char *emR3GetStateName(EMSTATE enmState)
* @param pVCpu Pointer to the VMCPU.
* @param rc Current EM VBox status code.
*/
-static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc)
+static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
{
for (;;)
{
- Log(("emR3Debug: rc=%Rrc\n", rc));
- const int rcLast = rc;
+ Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
+ const VBOXSTRICTRC rcLast = rc;
/*
* Debug related RC.
*/
- switch (rc)
+ switch (VBOXSTRICTRC_VAL(rc))
{
/*
* Single step an instruction.
*/
case VINF_EM_DBG_STEP:
- if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
- || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
- || pVCpu->em.s.fForceRAW /* paranoia */)
+ if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
+ || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
+ || pVCpu->em.s.fForceRAW /* paranoia */)
+#ifdef VBOX_WITH_RAW_MODE
rc = emR3RawStep(pVM, pVCpu);
+#else
+ AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
+#endif
+ else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
+ rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
+#ifdef VBOX_WITH_REM
+ else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
+ rc = emR3RemStep(pVM, pVCpu);
+#endif
else
{
- Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
- rc = emR3RemStep(pVM, pVCpu);
+ rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
+ if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
+ rc = VINF_EM_DBG_STEPPED;
}
break;
@@ -786,8 +879,11 @@ static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc)
break;
default: /** @todo don't use default for guru, but make special errors code! */
+ {
+ LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
break;
+ }
}
/*
@@ -795,7 +891,7 @@ static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc)
*/
do
{
- switch (rc)
+ switch (VBOXSTRICTRC_VAL(rc))
{
/*
* Continue the debugging loop.
@@ -822,9 +918,13 @@ static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc)
case VINF_EM_HALT:
if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
{
+#ifdef VBOX_WITH_RAW_MODE
rc = emR3RawResumeHyper(pVM, pVCpu);
if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
continue;
+#else
+ AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
+#endif
}
if (rc == VINF_SUCCESS)
rc = VINF_EM_RESCHEDULE;
@@ -835,7 +935,7 @@ static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc)
* We'll simply turn the thing off since that's the easiest thing to do.
*/
case VERR_DBGF_NOT_ATTACHED:
- switch (rcLast)
+ switch (VBOXSTRICTRC_VAL(rcLast))
{
case VINF_EM_DBG_HYPER_STEPPED:
case VINF_EM_DBG_HYPER_BREAKPOINT:
@@ -879,13 +979,14 @@ static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc)
* The rest is unexpected, and will keep us here.
*/
default:
- AssertMsgFailed(("Unexpected rc %Rrc!\n", rc));
+ AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
break;
}
} while (false);
} /* debug for ever */
}
+
/**
* Steps recompiled code.
*
@@ -897,7 +998,7 @@ static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc)
*/
static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
{
- LogFlow(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
+ Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
#ifdef VBOX_WITH_REM
EMRemLock(pVM);
@@ -917,7 +1018,7 @@ static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
#endif
- LogFlow(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
+ Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
return rc;
}
@@ -972,7 +1073,7 @@ static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
#if defined(VBOX_STRICT) && defined(DEBUG_bird)
- AssertMsg( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
+ AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
|| !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
#endif
@@ -1020,8 +1121,8 @@ static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
* We might have missed the raising of VMREQ, TIMER and some other
* important FFs while we were busy switching the state. So, check again.
*/
- if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
{
LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
goto l_REMDoForcedActions;
@@ -1032,7 +1133,7 @@ static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
/*
* Execute REM.
*/
- if (RT_LIKELY(EMR3IsExecutionAllowed(pVM, pVCpu)))
+ if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
{
STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
#ifdef VBOX_WITH_REM
@@ -1055,8 +1156,8 @@ static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
* Deal with high priority post execution FFs before doing anything
* else. Sync back the state and leave the lock to be on the safe side.
*/
- if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
{
#ifdef VBOX_WITH_REM
fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
@@ -1092,9 +1193,11 @@ static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
#ifdef VBOX_HIGH_RES_TIMERS_HACK
TMTimerPollVoid(pVM, pVCpu);
#endif
- AssertCompile((VMCPU_FF_ALL_REM_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)) & VMCPU_FF_TIMER);
- if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)))
+ AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu,
+ VMCPU_FF_ALL_REM_MASK
+ & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
{
l_REMDoForcedActions:
#ifdef VBOX_WITH_REM
@@ -1141,7 +1244,7 @@ int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
for (uint32_t i = 0; i < cIterations; i++)
{
DBGFR3PrgStep(pVCpu);
- DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
emR3RemStep(pVM, pVCpu);
if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
break;
@@ -1156,6 +1259,68 @@ int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
/**
+ * Try execute the problematic code in IEM first, then fall back on REM if there
+ * is too much of it or if IEM doesn't implement something.
+ *
+ * @returns Strict VBox status code from IEMExecLots.
+ * @param pVM The cross context VM structure.
+ * @param pVCpu The cross context CPU structure for the calling EMT.
+ * @param pfFFDone Force flags done indicator.
+ *
+ * @thread EMT(pVCpu)
+ */
+static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
+{
+ LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
+ *pfFFDone = false;
+
+ /*
+ * Execute in IEM for a while.
+ */
+ while (pVCpu->em.s.cIemThenRemInstructions < 1024)
+ {
+ VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu);
+ if (rcStrict != VINF_SUCCESS)
+ {
+ if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
+ || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
+ break;
+
+ pVCpu->em.s.cIemThenRemInstructions++;
+ Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
+ VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
+ return rcStrict;
+ }
+ pVCpu->em.s.cIemThenRemInstructions++;
+
+ EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
+ if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
+ {
+ LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
+ enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
+ pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
+ pVCpu->em.s.enmState = enmNewState;
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Check for pending actions.
+ */
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Switch to REM.
+ */
+ Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
+ pVCpu->em.s.enmState = EMSTATE_REM;
+ return VINF_SUCCESS;
+}
+
+
+/**
* Decides whether to execute RAW, HWACC or REM.
*
* @returns new EM state
@@ -1165,10 +1330,6 @@ int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
*/
EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
{
-#ifdef IEM_VERIFICATION_MODE
- return EMSTATE_REM;
-#else
-
/*
* When forcing raw-mode execution, things are simple.
*/
@@ -1181,28 +1342,35 @@ EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
return EMSTATE_WAIT_SIPI;
+ /*
+ * Execute everything in IEM?
+ */
+ if (pVM->em.s.fIemExecutesAll)
+ return EMSTATE_IEM;
+
/* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
/* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
/* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
X86EFLAGS EFlags = pCtx->eflags;
- if (HWACCMIsEnabled(pVM))
+ if (HMIsEnabled(pVM))
{
/*
* Hardware accelerated raw-mode:
- *
- * Typically only 32-bits protected mode, with paging enabled, code is
- * allowed here.
*/
if ( EMIsHwVirtExecutionEnabled(pVM)
- && HWACCMR3CanExecuteGuest(pVM, pCtx))
- return EMSTATE_HWACC;
+ && HMR3CanExecuteGuest(pVM, pCtx))
+ return EMSTATE_HM;
/*
* Note! Raw mode and hw accelerated mode are incompatible. The latter
* turns off monitoring features essential for raw mode!
*/
+#ifdef VBOX_WITH_FIRST_IEM_STEP
+ return EMSTATE_IEM_THEN_REM;
+#else
return EMSTATE_REM;
+#endif
}
/*
@@ -1267,8 +1435,17 @@ EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
if (!EMIsRawRing0Enabled(pVM))
return EMSTATE_REM;
+ if (EMIsRawRing1Enabled(pVM))
+ {
+ /* Only ring 0 and 1 supervisor code. */
+ if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
+ {
+ Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
+ return EMSTATE_REM;
+ }
+ }
/* Only ring 0 supervisor code. */
- if ((uSS & X86_SEL_RPL) != 0)
+ else if ((uSS & X86_SEL_RPL) != 0)
{
Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
return EMSTATE_REM;
@@ -1290,11 +1467,16 @@ EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
return EMSTATE_REM;
}
+# ifdef VBOX_WITH_RAW_MODE
if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
{
Log2(("raw r0 mode forced: patch code\n"));
+# ifdef VBOX_WITH_SAFE_STR
+ Assert(pCtx->tr.Sel);
+# endif
return EMSTATE_RAW;
}
+# endif /* VBOX_WITH_RAW_MODE */
# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
if (!(EFlags.u32 & X86_EFL_IF))
@@ -1305,12 +1487,14 @@ EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
}
# endif
+# ifndef VBOX_WITH_RAW_RING1
/** @todo still necessary??? */
if (EFlags.Bits.u2IOPL != 0)
{
Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
return EMSTATE_REM;
}
+# endif
}
/*
@@ -1347,10 +1531,16 @@ EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
return EMSTATE_REM;
}
+# ifdef VBOX_WITH_SAFE_STR
+ if (pCtx->tr.Sel == 0)
+ {
+ Log(("Raw mode refused -> TR=0\n"));
+ return EMSTATE_REM;
+ }
+# endif
+
/*Assert(PGMPhysIsA20Enabled(pVCpu));*/
return EMSTATE_RAW;
-#endif /* !IEM_VERIFICATION_MODE */
-
}
@@ -1367,13 +1557,39 @@ int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
{
VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
- if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
- PDMCritSectFF(pVCpu);
+ if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
+ PDMCritSectBothFF(pVCpu);
+
+ /* Update CR3 (Nested Paging case for HM). */
+ if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
+ {
+ int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
+ if (RT_FAILURE(rc2))
+ return rc2;
+ Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
+ }
- if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
+ /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
+ if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
+ {
+ if (CPUMIsGuestInPAEMode(pVCpu))
+ {
+ PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
+ AssertPtr(pPdpes);
+
+ PGMGstUpdatePaePdpes(pVCpu, pPdpes);
+ Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
+ }
+ else
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
+ }
+
+#ifdef VBOX_WITH_RAW_MODE
+ if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
CSAMR3DoPendingAction(pVM, pVCpu);
+#endif
- if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
+ if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
{
if ( rc > VINF_EM_NO_MEMORY
&& rc <= VINF_EM_LAST)
@@ -1423,13 +1639,13 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
/*
* Post execution chunk first.
*/
- if ( VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
+ || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
{
/*
* EMT Rendezvous (must be serviced before termination).
*/
- if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
+ if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
{
rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
UPDATE_RC();
@@ -1449,7 +1665,7 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
/*
* State change request (cleared by vmR3SetStateLocked).
*/
- if (VM_FF_ISPENDING(pVM, VM_FF_CHECK_VM_STATE))
+ if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
{
VMSTATE enmState = VMR3GetState(pVM);
switch (enmState)
@@ -1473,7 +1689,7 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
/*
* Debugger Facility polling.
*/
- if (VM_FF_ISPENDING(pVM, VM_FF_DBGF))
+ if (VM_FF_IS_PENDING(pVM, VM_FF_DBGF))
{
rc2 = DBGFR3VMMForcedAction(pVM);
UPDATE_RC();
@@ -1482,17 +1698,18 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
/*
* Postponed reset request.
*/
- if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET))
+ if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
{
- rc2 = VMR3Reset(pVM);
+ rc2 = VMR3Reset(pVM->pUVM);
UPDATE_RC();
}
+#ifdef VBOX_WITH_RAW_MODE
/*
* CSAM page scanning.
*/
- if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
- && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
+ if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
+ && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
{
PCPUMCTX pCtx = pVCpu->em.s.pCtx;
@@ -1502,11 +1719,12 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
}
+#endif
/*
* Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
*/
- if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
+ if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
{
rc2 = PGMR3PhysAllocateHandyPages(pVM);
UPDATE_RC();
@@ -1516,7 +1734,7 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
/* check that we got them all */
AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
- AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_CSAM_SCAN_PAGE);
+ AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0));
}
/*
@@ -1540,7 +1758,7 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
/*
* EMT Rendezvous (make sure they are handled before the requests).
*/
- if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
+ if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
{
rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
UPDATE_RC();
@@ -1590,7 +1808,7 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
/* Try not to cause deadlocks. */
if ( pVM->cCpus == 1
|| ( !PGMIsLockOwner(pVM)
- && !IOMIsLockOwner(pVM))
+ && !IOMIsLockWriteOwner(pVM))
)
{
EMRemLock(pVM);
@@ -1608,13 +1826,13 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
* Normal priority then. (per-VCPU)
* (Executed in no particular order.)
*/
- if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
- && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
+ if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
+ && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
{
/*
* Requests from other threads.
*/
- if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
+ if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
{
rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
@@ -1645,14 +1863,14 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
* High priority pre execution chunk last.
* (Executed in ascending priority order.)
*/
- if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
{
/*
* Timers before interrupts.
*/
- if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER)
- && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
+ if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
+ && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
TMR3TimerQueuesDo(pVM);
/*
@@ -1667,8 +1885,8 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
* unlikely, but such timing sensitive problem are not as rare as
* you might think.
*/
- if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
- && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
+ if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
+ && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
{
if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
{
@@ -1683,19 +1901,25 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
* Interrupts.
*/
bool fWakeupPending = false;
- if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
- && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
- && (!rc || rc >= VINF_EM_RESCHEDULE_HWACC)
+ if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
+ && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
+ && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
&& !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
+#ifdef VBOX_WITH_RAW_MODE
&& PATMAreInterruptsEnabled(pVM)
- && !HWACCMR3IsEventPending(pVCpu))
+#else
+ && (pVCpu->em.s.pCtx->eflags.u32 & X86_EFL_IF)
+#endif
+ && !HMR3IsEventPending(pVCpu))
{
Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
- if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
+ if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
{
/* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
/** @todo this really isn't nice, should properly handle this */
rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
+ if (pVM->em.s.fIemExecutesAll && (rc2 == VINF_EM_RESCHEDULE_REM || rc2 == VINF_EM_RESCHEDULE_HM || rc2 == VINF_EM_RESCHEDULE_RAW))
+ rc2 = VINF_EM_RESCHEDULE;
#ifdef VBOX_STRICT
rcIrq = rc2;
#endif
@@ -1707,6 +1931,7 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
/** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
else if (REMR3QueryPendingInterrupt(pVM, pVCpu) != REM_NO_PENDING_IRQ)
{
+ Log2(("REMR3QueryPendingInterrupt -> %#x\n", REMR3QueryPendingInterrupt(pVM, pVCpu)));
rc2 = VINF_EM_RESCHEDULE_REM;
UPDATE_RC();
}
@@ -1735,7 +1960,7 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
* EMT Rendezvous (must be serviced before termination).
*/
if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
- && VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
+ && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
{
rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
UPDATE_RC();
@@ -1755,7 +1980,7 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
* State change request (cleared by vmR3SetStateLocked).
*/
if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
- && VM_FF_ISPENDING(pVM, VM_FF_CHECK_VM_STATE))
+ && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
{
VMSTATE enmState = VMR3GetState(pVM);
switch (enmState)
@@ -1782,7 +2007,7 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
* at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
* than us since we can terminate without allocating more memory.
*/
- if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
+ if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
{
rc2 = PGMR3PhysAllocateHandyPages(pVM);
UPDATE_RC();
@@ -1793,14 +2018,14 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
/*
* If the virtual sync clock is still stopped, make TM restart it.
*/
- if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
+ if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
TMR3VirtualSyncFF(pVM, pVCpu);
#ifdef DEBUG
/*
* Debug, pause the VM.
*/
- if (VM_FF_ISPENDING(pVM, VM_FF_DEBUG_SUSPEND))
+ if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
{
VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
@@ -1810,7 +2035,7 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
/* check that we got them all */
AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
- AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS));
+ AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
}
#undef UPDATE_RC
@@ -1827,9 +2052,8 @@ int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
* @returns true if allowed, false otherwise
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
- *
*/
-VMMR3DECL(bool) EMR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
+bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
{
uint64_t u64UserTime, u64KernelTime;
@@ -1871,7 +2095,7 @@ VMMR3DECL(bool) EMR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
*/
-VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
+VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
{
Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
pVM,
@@ -1909,6 +2133,8 @@ VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
else
pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
+ pVCpu->em.s.cIemThenRemInstructions = 0;
+ Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
for (;;)
@@ -1922,15 +2148,16 @@ VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
* Do forced actions.
*/
if ( !fFFDone
+ && RT_SUCCESS(rc)
&& rc != VINF_EM_TERMINATE
&& rc != VINF_EM_OFF
- && ( VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK)))
+ && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK)))
{
rc = emR3ForcedActions(pVM, pVCpu, rc);
VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
if ( ( rc == VINF_EM_RESCHEDULE_REM
- || rc == VINF_EM_RESCHEDULE_HWACC)
+ || rc == VINF_EM_RESCHEDULE_HM)
&& pVCpu->em.s.fForceRAW)
rc = VINF_EM_RESCHEDULE_RAW;
}
@@ -1955,24 +2182,46 @@ VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
*/
case VINF_EM_RESCHEDULE_RAW:
Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
+ Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
pVCpu->em.s.enmState = EMSTATE_RAW;
break;
/*
* Reschedule - to hardware accelerated raw-mode execution.
*/
- case VINF_EM_RESCHEDULE_HWACC:
- Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HWACC: %d -> %d (EMSTATE_HWACC)\n", enmOldState, EMSTATE_HWACC));
+ case VINF_EM_RESCHEDULE_HM:
+ Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
+ Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
Assert(!pVCpu->em.s.fForceRAW);
- pVCpu->em.s.enmState = EMSTATE_HWACC;
+ pVCpu->em.s.enmState = EMSTATE_HM;
break;
/*
* Reschedule - to recompiled execution.
*/
case VINF_EM_RESCHEDULE_REM:
+#ifdef VBOX_WITH_FIRST_IEM_STEP
+ Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
+ if (HMIsEnabled(pVM))
+ {
+ Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
+ enmOldState, EMSTATE_IEM_THEN_REM));
+ if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
+ {
+ pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
+ pVCpu->em.s.cIemThenRemInstructions = 0;
+ }
+ }
+ else
+ {
+ Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
+ pVCpu->em.s.enmState = EMSTATE_REM;
+ }
+#else
Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
+ Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
pVCpu->em.s.enmState = EMSTATE_REM;
+#endif
break;
/*
@@ -1996,6 +2245,8 @@ VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
{
EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
+ if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
+ pVCpu->em.s.cIemThenRemInstructions = 0;
pVCpu->em.s.enmState = enmState;
break;
}
@@ -2038,6 +2289,8 @@ VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
{
EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
+ if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
+ pVCpu->em.s.cIemThenRemInstructions = 0;
pVCpu->em.s.enmState = enmState;
}
else
@@ -2098,7 +2351,6 @@ VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
* Guest debug events.
*/
case VINF_EM_DBG_STEPPED:
- AssertMsgFailed(("VINF_EM_DBG_STEPPED cannot be here!"));
case VINF_EM_DBG_STOP:
case VINF_EM_DBG_BREAKPOINT:
case VINF_EM_DBG_STEP:
@@ -2107,11 +2359,21 @@ VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
}
- else
+ else if (enmOldState == EMSTATE_HM)
+ {
+ Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
+ pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
+ }
+ else if (enmOldState == EMSTATE_REM)
{
Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
}
+ else
+ {
+ Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
+ pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
+ }
break;
/*
@@ -2127,6 +2389,22 @@ VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
/*
* Guru mediations.
*/
+ case VINF_EM_TRIPLE_FAULT:
+ if (!pVM->em.s.fGuruOnTripleFault)
+ {
+ Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
+ Assert(pVM->cCpus == 1);
+ REMR3Reset(pVM);
+ PGMR3ResetCpu(pVM, pVCpu);
+ TRPMR3ResetCpu(pVCpu);
+ CPUMR3ResetCpu(pVM, pVCpu);
+ EMR3ResetCpu(pVCpu);
+ HMR3ResetCpu(pVCpu);
+ pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
+ Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d\n", rc, enmOldState, pVCpu->em.s.enmState));
+ break;
+ }
+ /* Else fall through and trigger a guru. */
case VERR_VMM_RING0_ASSERTION:
Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
@@ -2162,10 +2440,12 @@ VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
if ( enmOldState == EMSTATE_HALTED
&& (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
&& ( enmNewState == EMSTATE_RAW
- || enmNewState == EMSTATE_HWACC
+ || enmNewState == EMSTATE_HM
|| enmNewState == EMSTATE_REM
+ || enmNewState == EMSTATE_IEM_THEN_REM
|| enmNewState == EMSTATE_DEBUG_GUEST_RAW
- || enmNewState == EMSTATE_DEBUG_GUEST_HWACC
+ || enmNewState == EMSTATE_DEBUG_GUEST_HM
+ || enmNewState == EMSTATE_DEBUG_GUEST_IEM
|| enmNewState == EMSTATE_DEBUG_GUEST_REM) )
{
LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
@@ -2187,39 +2467,66 @@ VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
* Execute raw.
*/
case EMSTATE_RAW:
-#ifndef IEM_VERIFICATION_MODE /* remove later */
+#ifdef VBOX_WITH_RAW_MODE
rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
- break;
+#else
+ AssertLogRelMsgFailed(("%Rrc\n", rc));
+ rc = VERR_EM_INTERNAL_ERROR;
#endif
+ break;
/*
* Execute hardware accelerated raw.
*/
- case EMSTATE_HWACC:
-#ifndef IEM_VERIFICATION_MODE /* remove later */
- rc = emR3HwAccExecute(pVM, pVCpu, &fFFDone);
+ case EMSTATE_HM:
+ rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
break;
-#endif
/*
* Execute recompiled.
*/
case EMSTATE_REM:
-#ifdef IEM_VERIFICATION_MODE
-# if 1
- rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); fFFDone = false;
-# else
- rc = VBOXSTRICTRC_TODO(REMR3EmulateInstruction(pVM, pVCpu)); fFFDone = false;
- if (rc == VINF_EM_RESCHEDULE)
- rc = VINF_SUCCESS;
-# endif
-#else
rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
-#endif
Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
break;
/*
+ * Execute in the interpreter.
+ */
+ case EMSTATE_IEM:
+ {
+#if 0 /* For testing purposes. */
+ STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
+ rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
+ STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
+ if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
+ rc = VINF_SUCCESS;
+ else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
+#endif
+ rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu));
+ if (pVM->em.s.fIemExecutesAll)
+ {
+ Assert(rc != VINF_EM_RESCHEDULE_REM);
+ Assert(rc != VINF_EM_RESCHEDULE_RAW);
+ Assert(rc != VINF_EM_RESCHEDULE_HM);
+ }
+ fFFDone = false;
+ break;
+ }
+
+ /*
+ * Execute in IEM, hoping we can quickly switch aback to HM
+ * or RAW execution. If our hopes fail, we go to REM.
+ */
+ case EMSTATE_IEM_THEN_REM:
+ {
+ STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
+ rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
+ STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
+ break;
+ }
+
+ /*
* Application processor execution halted until SIPI.
*/
case EMSTATE_WAIT_SIPI:
@@ -2230,14 +2537,20 @@ VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
case EMSTATE_HALTED:
{
STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
+ /* If HM (or someone else) store a pending interrupt in
+ TRPM, it must be dispatched ASAP without any halting.
+ Anything pending in TRPM has been accepted and the CPU
+ should already be the right state to receive it. */
+ if (TRPMHasTrap(pVCpu))
+ rc = VINF_EM_RESCHEDULE;
/* MWAIT has a special extension where it's woken up when
an interrupt is pending even when IF=0. */
- if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
- == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
+ else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
+ == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
{
rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
if ( rc == VINF_SUCCESS
- && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
+ && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
{
Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
rc = VINF_EM_RESCHEDULE;
@@ -2262,12 +2575,14 @@ VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
/*
* Debugging in the guest.
*/
- case EMSTATE_DEBUG_GUEST_REM:
case EMSTATE_DEBUG_GUEST_RAW:
+ case EMSTATE_DEBUG_GUEST_HM:
+ case EMSTATE_DEBUG_GUEST_IEM:
+ case EMSTATE_DEBUG_GUEST_REM:
TMR3NotifySuspend(pVM, pVCpu);
- rc = emR3Debug(pVM, pVCpu, rc);
+ rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
TMR3NotifyResume(pVM, pVCpu);
- Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
+ Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
break;
/*
@@ -2278,13 +2593,18 @@ VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
TMR3NotifySuspend(pVM, pVCpu);
STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
- rc = emR3Debug(pVM, pVCpu, rc);
- Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
+ rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
+ Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
if (rc != VINF_SUCCESS)
{
- /* switch to guru meditation mode */
- pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
- VMMR3FatalDump(pVM, pVCpu, rc);
+ if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
+ pVCpu->em.s.enmState = EMSTATE_TERMINATING;
+ else
+ {
+ /* switch to guru meditation mode */
+ pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
+ VMMR3FatalDump(pVM, pVCpu, rc);
+ }
Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
return rc;
}
@@ -2345,7 +2665,7 @@ VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
*
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) EMR3NotifySuspend(PVM pVM)
+VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
{
PVMCPU pVCpu = VMMGetCpu(pVM);
@@ -2360,7 +2680,7 @@ VMMR3DECL(int) EMR3NotifySuspend(PVM pVM)
*
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) EMR3NotifyResume(PVM pVM)
+VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
{
PVMCPU pVCpu = VMMGetCpu(pVM);
EMSTATE enmCurState = pVCpu->em.s.enmState;
diff --git a/src/VBox/VMM/VMMR3/EMHwaccm.cpp b/src/VBox/VMM/VMMR3/EMHM.cpp
index 35fe45a7..39dd8df2 100644
--- a/src/VBox/VMM/VMMR3/EMHwaccm.cpp
+++ b/src/VBox/VMM/VMMR3/EMHM.cpp
@@ -1,10 +1,10 @@
-/* $Id: EMHwaccm.cpp $ */
+/* $Id: EMHM.cpp $ */
/** @file
* EM - Execution Monitor / Manager - hardware virtualization
*/
/*
- * Copyright (C) 2006-2012 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -37,9 +37,8 @@
#include <VBox/vmm/pdmapi.h>
#include <VBox/vmm/pdmcritsect.h>
#include <VBox/vmm/pdmqueue.h>
-#include <VBox/vmm/hwaccm.h>
+#include <VBox/vmm/hm.h>
#include "EMInternal.h"
-#include "internal/em.h"
#include <VBox/vmm/vm.h>
#include <VBox/vmm/cpumdis.h>
#include <VBox/dis.h>
@@ -54,103 +53,105 @@
* Defined Constants And Macros *
*******************************************************************************/
#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
-#define EM_NOTIFY_HWACCM
+#define EM_NOTIFY_HM
#endif
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
-DECLINLINE(int) emR3ExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);
-static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu);
-static int emR3HwaccmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
+DECLINLINE(int) emR3HmExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);
+static int emR3HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu);
+static int emR3HmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
-#define EMHANDLERC_WITH_HWACCM
+#define EMHANDLERC_WITH_HM
+#define emR3ExecuteInstruction emR3HmExecuteInstruction
+#define emR3ExecuteIOInstruction emR3HmExecuteIOInstruction
#include "EMHandleRCTmpl.h"
-#if defined(DEBUG) && defined(SOME_UNUSED_FUNCTIONS)
-
/**
- * Steps hardware accelerated mode.
+ * Executes instruction in HM mode if we can.
+ *
+ * This is somewhat comparable to REMR3EmulateInstruction.
+ *
+ * @returns VBox strict status code.
+ * @retval VINF_EM_DBG_STEPPED on success.
+ * @retval VERR_EM_CANNOT_EXEC_GUEST if we cannot execute guest instructions in
+ * HM right now.
*
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pVCpu Pointer to the VMCPU.
+ * @param pVM Pointer to the cross context VM structure.
+ * @param pVCpu Pointer to the cross context CPU structure for
+ * the calling EMT.
+ * @param fFlags Combinations of EM_ONE_INS_FLAGS_XXX.
+ * @thread EMT.
*/
-static int emR3HwAccStep(PVM pVM, PVMCPU pVCpu)
+VMMR3_INT_DECL(VBOXSTRICTRC) EMR3HmSingleInstruction(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
{
- Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC);
+ PCPUMCTX pCtx = pVCpu->em.s.pCtx;
+ Assert(!(fFlags & ~EM_ONE_INS_FLAGS_MASK));
- int rc;
- PCPUMCTX pCtx = pVCpu->em.s.pCtx;
- VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS));
+ if (!HMR3CanExecuteGuest(pVM, pCtx))
+ return VINF_EM_RESCHEDULE;
- /*
- * Check vital forced actions, but ignore pending interrupts and timers.
- */
- if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
- {
- rc = emR3HwaccmForcedActions(pVM, pVCpu, pCtx);
- if (rc != VINF_SUCCESS)
- return rc;
- }
- /*
- * Set flags for single stepping.
- */
- CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
-
- /*
- * Single step.
- * We do not start time or anything, if anything we should just do a few nanoseconds.
- */
- do
+ uint64_t const uOldRip = pCtx->rip;
+ for (;;)
{
- rc = VMMR3HwAccRunGC(pVM, pVCpu);
- } while ( rc == VINF_SUCCESS
- || rc == VINF_EM_RAW_INTERRUPT);
- VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
-
- /*
- * Make sure the trap flag is cleared.
- * (Too bad if the guest is trying to single step too.)
- */
- CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
+ /*
+ * Service necessary FFs before going into HM.
+ */
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
+ {
+ VBOXSTRICTRC rcStrict = emR3HmForcedActions(pVM, pVCpu, pCtx);
+ if (rcStrict != VINF_SUCCESS)
+ {
+ Log(("EMR3HmSingleInstruction: FFs before -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ return rcStrict;
+ }
+ }
- /*
- * Deal with the return codes.
- */
- rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
- rc = emR3HwaccmHandleRC(pVM, pVCpu, pCtx, rc);
- return rc;
-}
+ /*
+ * Go execute it.
+ */
+ bool fOld = HMSetSingleInstruction(pVCpu, true);
+ VBOXSTRICTRC rcStrict = VMMR3HmRunGC(pVM, pVCpu);
+ HMSetSingleInstruction(pVCpu, fOld);
+ LogFlow(("EMR3HmSingleInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ /*
+ * Handle high priority FFs and informational status codes. We don't do
+ * normal FF processing the caller or the next call can deal with them.
+ */
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
+ {
+ rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
+ LogFlow(("EMR3HmSingleInstruction: FFs after -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ }
-static int emR3SingleStepExecHwAcc(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
-{
- int rc = VINF_SUCCESS;
- EMSTATE enmOldState = pVCpu->em.s.enmState;
- pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HWACC;
+ if (rcStrict != VINF_SUCCESS && (rcStrict < VINF_EM_FIRST || rcStrict > VINF_EM_LAST))
+ {
+ rcStrict = emR3HmHandleRC(pVM, pVCpu, pCtx, VBOXSTRICTRC_TODO(rcStrict));
+ Log(("EMR3HmSingleInstruction: emR3HmHandleRC -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ }
- Log(("Single step BEGIN:\n"));
- for (uint32_t i = 0; i < cIterations; i++)
- {
- DBGFR3PrgStep(pVCpu);
- DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");
- rc = emR3HwAccStep(pVM, pVCpu);
- if ( rc != VINF_SUCCESS
- || !HWACCMR3CanExecuteGuest(pVM, pVCpu->em.s.pCtx))
- break;
+ /*
+ * Done?
+ */
+ if ( (rcStrict != VINF_SUCCESS && rcStrict != VINF_EM_DBG_STEPPED)
+ || !(fFlags & EM_ONE_INS_FLAGS_RIP_CHANGE)
+ || pCtx->rip != uOldRip)
+ {
+ if (rcStrict == VINF_SUCCESS && pCtx->rip != uOldRip)
+ rcStrict = VINF_EM_DBG_STEPPED;
+ Log(("EMR3HmSingleInstruction: returns %Rrc (rip %llx -> %llx)\n", VBOXSTRICTRC_VAL(rcStrict), uOldRip, pCtx->rip));
+ return rcStrict;
+ }
}
- Log(("Single step END: rc=%Rrc\n", rc));
- CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
- pVCpu->em.s.enmState = enmOldState;
- return rc == VINF_SUCCESS ? VINF_EM_RESCHEDULE_REM : rc;
}
-#endif /* DEBUG */
-
/**
* Executes one (or perhaps a few more) instruction(s).
@@ -164,9 +165,9 @@ static int emR3SingleStepExecHwAcc(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
* instruction and prefix the log output with this text.
*/
#ifdef LOG_ENABLED
-static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC, const char *pszPrefix)
+static int emR3HmExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC, const char *pszPrefix)
#else
-static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC)
+static int emR3HmExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC)
#endif
{
#ifdef LOG_ENABLED
@@ -175,79 +176,55 @@ static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC)
int rc;
NOREF(rcRC);
- /*
- *
- * The simple solution is to use the recompiler.
- * The better solution is to disassemble the current instruction and
- * try handle as many as possible without using REM.
- *
- */
-
#ifdef LOG_ENABLED
/*
- * Disassemble the instruction if requested.
+ * Log it.
*/
+ Log(("EMINS: %04x:%RGv RSP=%RGv\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));
if (pszPrefix)
{
- DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
- DBGFR3DisasInstrCurrentLog(pVCpu, pszPrefix);
+ DBGFR3_INFO_LOG(pVM, "cpumguest", pszPrefix);
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix);
}
-#endif /* LOG_ENABLED */
+#endif
-#if 0
- /* Try our own instruction emulator before falling back to the recompiler. */
- DISCPUSTATE Cpu;
- rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "GEN EMU");
- if (RT_SUCCESS(rc))
- {
- switch (Cpu.pCurInstr->uOpcode)
- {
- /* @todo we can do more now */
- case OP_MOV:
- case OP_AND:
- case OP_OR:
- case OP_XOR:
- case OP_POP:
- case OP_INC:
- case OP_DEC:
- case OP_XCHG:
- STAM_PROFILE_START(&pVCpu->em.s.StatMiscEmu, a);
- rc = EMInterpretInstructionCpuUpdtPC(pVM, pVCpu, &Cpu, CPUMCTX2CORE(pCtx), 0);
- if (RT_SUCCESS(rc))
- {
-#ifdef EM_NOTIFY_HWACCM
- if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
- HWACCMR3NotifyEmulated(pVCpu);
+ /*
+ * Use IEM and fallback on REM if the functionality is missing.
+ * Once IEM gets mature enough, nothing should ever fall back.
+ */
+#if defined(VBOX_WITH_FIRST_IEM_STEP) || !defined(VBOX_WITH_REM)
+ STAM_PROFILE_START(&pVCpu->em.s.StatIEMEmu, a);
+ rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu));
+ STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMEmu, a);
+
+ if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
+ || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
#endif
- STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
- return rc;
- }
- if (rc != VERR_EM_INTERPRETER)
- AssertMsgFailedReturn(("rc=%Rrc\n", rc), rc);
- STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
- break;
- }
- }
-#endif /* 0 */
- STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, a);
- Log(("EMINS: %04x:%RGv RSP=%RGv\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));
+ {
#ifdef VBOX_WITH_REM
- EMRemLock(pVM);
- /* Flush the recompiler TLB if the VCPU has changed. */
- if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
- CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
- pVM->em.s.idLastRemCpu = pVCpu->idCpu;
-
- rc = REMR3EmulateInstruction(pVM, pVCpu);
- EMRemUnlock(pVM);
-#else
- rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
-#endif
- STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, a);
+ STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, b);
+# ifndef VBOX_WITH_FIRST_IEM_STEP
+ Log(("EMINS[rem]: %04x:%RGv RSP=%RGv\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));
+//# elif defined(DEBUG_bird)
+// AssertFailed();
+# endif
+ EMRemLock(pVM);
+ /* Flush the recompiler TLB if the VCPU has changed. */
+ if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
+ CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
+ pVM->em.s.idLastRemCpu = pVCpu->idCpu;
+
+ rc = REMR3EmulateInstruction(pVM, pVCpu);
+ EMRemUnlock(pVM);
+ STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, b);
+#else /* !VBOX_WITH_REM */
+ NOREF(pVM);
+#endif /* !VBOX_WITH_REM */
+ }
-#ifdef EM_NOTIFY_HWACCM
- if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
- HWACCMR3NotifyEmulated(pVCpu);
+#ifdef EM_NOTIFY_HM
+ if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
+ HMR3NotifyEmulated(pVCpu);
#endif
return rc;
}
@@ -264,12 +241,12 @@ static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC)
* instruction and prefix the log output with this text.
* @param rcGC GC return code
*/
-DECLINLINE(int) emR3ExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)
+DECLINLINE(int) emR3HmExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)
{
#ifdef LOG_ENABLED
- return emR3ExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);
+ return emR3HmExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);
#else
- return emR3ExecuteInstructionWorker(pVM, pVCpu, rcGC);
+ return emR3HmExecuteInstructionWorker(pVM, pVCpu, rcGC);
#endif
}
@@ -280,14 +257,16 @@ DECLINLINE(int) emR3ExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPre
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
*/
-static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
+static int emR3HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
{
PCPUMCTX pCtx = pVCpu->em.s.pCtx;
STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
- /* Try to restart the io instruction that was refused in ring-0. */
- VBOXSTRICTRC rcStrict = HWACCMR3RestartPendingIOInstr(pVM, pVCpu, pCtx);
+ /*
+ * Try to restart the io instruction that was refused in ring-0.
+ */
+ VBOXSTRICTRC rcStrict = HMR3RestartPendingIOInstr(pVM, pVCpu, pCtx);
if (IOM_SUCCESS(rcStrict))
{
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoRestarted);
@@ -297,6 +276,17 @@ static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
RT_SUCCESS_NP(rcStrict) ? VERR_IPE_UNEXPECTED_INFO_STATUS : VBOXSTRICTRC_TODO(rcStrict));
+#ifdef VBOX_WITH_FIRST_IEM_STEP
+ /*
+ * Hand it over to the interpreter.
+ */
+ rcStrict = IEMExecOne(pVCpu);
+ LogFlow(("emR3HmExecuteIOInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoIem);
+ STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
+ return VBOXSTRICTRC_TODO(rcStrict);
+
+#else
/** @todo probably we should fall back to the recompiler; otherwise we'll go back and forth between HC & GC
* as io instructions tend to come in packages of more than one
*/
@@ -313,14 +303,14 @@ static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
case OP_IN:
{
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
- rcStrict = IOMInterpretIN(pVM, CPUMCTX2CORE(pCtx), &Cpu);
+ rcStrict = IOMInterpretIN(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
break;
}
case OP_OUT:
{
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
- rcStrict = IOMInterpretOUT(pVM, CPUMCTX2CORE(pCtx), &Cpu);
+ rcStrict = IOMInterpretOUT(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
break;
}
}
@@ -333,7 +323,7 @@ static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
case OP_INSWD:
{
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
- rcStrict = IOMInterpretINS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
+ rcStrict = IOMInterpretINS(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
break;
}
@@ -341,7 +331,7 @@ static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
case OP_OUTSWD:
{
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
- rcStrict = IOMInterpretOUTS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
+ rcStrict = IOMInterpretOUTS(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
break;
}
}
@@ -355,6 +345,7 @@ static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
{
pCtx->rip += Cpu.cbInstr;
STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
+ LogFlow(("emR3HmExecuteIOInstruction: %Rrc 1\n", VBOXSTRICTRC_VAL(rcStrict)));
return VBOXSTRICTRC_TODO(rcStrict);
}
@@ -363,6 +354,7 @@ static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
/* The active trap will be dispatched. */
Assert(TRPMHasTrap(pVCpu));
STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
+ LogFlow(("emR3HmExecuteIOInstruction: VINF_SUCCESS 2\n"));
return VINF_SUCCESS;
}
AssertMsg(rcStrict != VINF_TRPM_XCPT_DISPATCHED, ("Handle VINF_TRPM_XCPT_DISPATCHED\n"));
@@ -370,13 +362,17 @@ static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
if (RT_FAILURE(rcStrict))
{
STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
+ LogFlow(("emR3HmExecuteIOInstruction: %Rrc 3\n", VBOXSTRICTRC_VAL(rcStrict)));
return VBOXSTRICTRC_TODO(rcStrict);
}
AssertMsg(rcStrict == VINF_EM_RAW_EMULATE_INSTR || rcStrict == VINF_EM_RESCHEDULE_REM, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
}
STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
- return emR3ExecuteInstruction(pVM, pVCpu, "IO: ");
+ int rc3 = emR3HmExecuteInstruction(pVM, pVCpu, "IO: ");
+ LogFlow(("emR3HmExecuteIOInstruction: %Rrc 4 (rc2=%Rrc, rc3=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict), rc2, rc3));
+ return rc3;
+#endif
}
@@ -391,19 +387,21 @@ static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest CPU context.
*/
-static int emR3HwaccmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
+static int emR3HmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
{
/*
* Sync page directory.
*/
- if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
+ if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
{
Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
- int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
+ int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
if (RT_FAILURE(rc))
return rc;
- Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
+#ifdef VBOX_WITH_RAW_MODE
+ Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
+#endif
/* Prefetch pages for EIP and ESP. */
/** @todo This is rather expensive. Should investigate if it really helps at all. */
@@ -417,12 +415,14 @@ static int emR3HwaccmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
AssertLogRelMsgReturn(RT_FAILURE(rc), ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
return rc;
}
- rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
+ rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
if (RT_FAILURE(rc))
return rc;
}
/** @todo maybe prefetch the supervisor stack page as well */
- Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
+#ifdef VBOX_WITH_RAW_MODE
+ Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
+#endif
}
/*
@@ -442,7 +442,7 @@ static int emR3HwaccmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
* since we ran FFs. The allocate handy pages must for instance always be followed by
* this check.
*/
- if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
+ if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
return VINF_EM_NO_MEMORY;
return VINF_SUCCESS;
@@ -463,18 +463,18 @@ static int emR3HwaccmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
* @param pfFFDone Where to store an indicator telling whether or not
* FFs were done before returning.
*/
-int emR3HwAccExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
+int emR3HmExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
{
int rc = VERR_IPE_UNINITIALIZED_STATUS;
PCPUMCTX pCtx = pVCpu->em.s.pCtx;
- LogFlow(("emR3HwAccExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
+ LogFlow(("emR3HmExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
*pfFFDone = false;
- STAM_COUNTER_INC(&pVCpu->em.s.StatHwAccExecuteEntry);
+ STAM_COUNTER_INC(&pVCpu->em.s.StatHmExecuteEntry);
-#ifdef EM_NOTIFY_HWACCM
- HWACCMR3NotifyScheduled(pVCpu);
+#ifdef EM_NOTIFY_HM
+ HMR3NotifyScheduled(pVCpu);
#endif
/*
@@ -482,10 +482,10 @@ int emR3HwAccExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
*/
for (;;)
{
- STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHwAccEntry, a);
+ STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHmEntry, a);
/* Check if a forced reschedule is pending. */
- if (HWACCMR3IsRescheduleRequired(pVM, pCtx))
+ if (HMR3IsRescheduleRequired(pVM, pCtx))
{
rc = VINF_EM_RESCHEDULE;
break;
@@ -494,11 +494,13 @@ int emR3HwAccExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
/*
* Process high priority pre-execution raw-mode FFs.
*/
- VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); /* not relevant in HWACCM mode; shouldn't be set really. */
- if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
+#ifdef VBOX_WITH_RAW_MODE
+ Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
+#endif
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
{
- rc = emR3HwaccmForcedActions(pVM, pVCpu, pCtx);
+ rc = emR3HmForcedActions(pVM, pVCpu, pCtx);
if (rc != VINF_SUCCESS)
break;
}
@@ -535,13 +537,13 @@ int emR3HwAccExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
/*
* Execute the code.
*/
- STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHwAccEntry, a);
+ STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHmEntry, a);
- if (RT_LIKELY(EMR3IsExecutionAllowed(pVM, pVCpu)))
+ if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
{
- STAM_PROFILE_START(&pVCpu->em.s.StatHwAccExec, x);
- rc = VMMR3HwAccRunGC(pVM, pVCpu);
- STAM_PROFILE_STOP(&pVCpu->em.s.StatHwAccExec, x);
+ STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x);
+ rc = VMMR3HmRunGC(pVM, pVCpu);
+ STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x);
}
else
{
@@ -557,8 +559,8 @@ int emR3HwAccExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
* Deal with high priority post execution FFs before doing anything else.
*/
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
- if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
/*
@@ -567,7 +569,7 @@ int emR3HwAccExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
break;
- rc = emR3HwaccmHandleRC(pVM, pVCpu, pCtx, rc);
+ rc = emR3HmHandleRC(pVM, pVCpu, pCtx, rc);
if (rc != VINF_SUCCESS)
break;
@@ -577,13 +579,13 @@ int emR3HwAccExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
#ifdef VBOX_HIGH_RES_TIMERS_HACK
TMTimerPollVoid(pVM, pVCpu);
#endif
- if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_MASK))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_MASK))
{
rc = emR3ForcedActions(pVM, pVCpu, rc);
VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
if ( rc != VINF_SUCCESS
- && rc != VINF_EM_RESCHEDULE_HWACC)
+ && rc != VINF_EM_RESCHEDULE_HM)
{
*pfFFDone = true;
break;
diff --git a/src/VBox/VMM/VMMR3/EMR3Dbg.cpp b/src/VBox/VMM/VMMR3/EMR3Dbg.cpp
new file mode 100644
index 00000000..458ca6fd
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/EMR3Dbg.cpp
@@ -0,0 +1,74 @@
+/* $Id: EMR3Dbg.cpp $ */
+/** @file
+ * EM - Execution Monitor / Manager, Debugger Related Bits.
+ */
+
+/*
+ * Copyright (C) 2006-2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+/*******************************************************************************
+* Header Files *
+*******************************************************************************/
+#define LOG_GROUP LOG_GROUP_EM
+#include <VBox/vmm/em.h>
+#include <VBox/dbg.h>
+#include "EMInternal.h"
+
+
+/** @callback_method_impl{FNDBGCCMD,
+ * Implements the '.alliem' command. }
+ */
+static DECLCALLBACK(int) enmR3DbgCmdAllIem(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
+{
+ int rc;
+ bool f;
+
+ if (cArgs == 0)
+ {
+ rc = EMR3QueryExecutionPolicy(pUVM, EMEXECPOLICY_IEM_ALL, &f);
+ if (RT_FAILURE(rc))
+ return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "EMR3QueryExecutionPolicy(,EMEXECPOLICY_IEM_ALL,");
+ DBGCCmdHlpPrintf(pCmdHlp, f ? "alliem: enabled\n" : "alliem: disabled\n");
+ }
+ else
+ {
+ rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &f);
+ if (RT_FAILURE(rc))
+ return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
+ rc = EMR3SetExecutionPolicy(pUVM, EMEXECPOLICY_IEM_ALL, f);
+ if (RT_FAILURE(rc))
+ return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "EMR3SetExecutionPolicy(,EMEXECPOLICY_IEM_ALL,%RTbool)", f);
+ }
+ return VINF_SUCCESS;
+}
+
+
+/** Describes a optional boolean argument. */
+static DBGCVARDESC const g_BoolArg = { 0, 1, DBGCVAR_CAT_ANY, 0, "boolean", "Boolean value." };
+
+/** Commands. */
+static DBGCCMD const g_aCmds[] =
+{
+ {
+ "alliem", 0, 1, &g_BoolArg, 1, 0, enmR3DbgCmdAllIem, "[boolean]",
+ "Enables or disabled executing ALL code in IEM, if no arguments are given it displays the current status."
+ },
+};
+
+
+int emR3InitDbg(PVM pVM)
+{
+ int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
+ AssertLogRelRC(rc);
+ return rc;
+}
+
diff --git a/src/VBox/VMM/VMMR3/EMRaw.cpp b/src/VBox/VMM/VMMR3/EMRaw.cpp
index 3dec3141..a9c4813d 100644
--- a/src/VBox/VMM/VMMR3/EMRaw.cpp
+++ b/src/VBox/VMM/VMMR3/EMRaw.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2012 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -41,7 +41,6 @@
#include <VBox/vmm/pdmqueue.h>
#include <VBox/vmm/patm.h>
#include "EMInternal.h"
-#include "internal/em.h"
#include <VBox/vmm/vm.h>
#include <VBox/vmm/cpumdis.h>
#include <VBox/dis.h>
@@ -59,15 +58,17 @@
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
-static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
-DECLINLINE(int) emR3ExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);
-static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu);
-static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret);
-static int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu);
-static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu);
-static int emR3RawRingSwitch(PVM pVM, PVMCPU pVCpu);
+static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
+DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);
+static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu);
+static int emR3RawPatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret);
+static int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu);
+static int emR3RawExecuteIOInstruction(PVM pVM, PVMCPU pVCpu);
+static int emR3RawRingSwitch(PVM pVM, PVMCPU pVCpu);
#define EMHANDLERC_WITH_PATM
+#define emR3ExecuteInstruction emR3RawExecuteInstruction
+#define emR3ExecuteIOInstruction emR3RawExecuteIOInstruction
#include "EMHandleRCTmpl.h"
@@ -127,11 +128,11 @@ int emR3RawResumeHyper(PVM pVM, PVMCPU pVCpu)
/*
* Resume execution.
*/
- CPUMR3RawEnter(pVCpu, NULL);
+ CPUMRawEnter(pVCpu, NULL);
CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_RF);
rc = VMMR3ResumeHyper(pVM, pVCpu);
Log(("emR3RawResumeHyper: cs:eip=%RTsel:%RGr efl=%RGr - returned from GC with rc=%Rrc\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags, rc));
- rc = CPUMR3RawLeave(pVCpu, NULL, rc);
+ rc = CPUMRawLeave(pVCpu, NULL, rc);
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
/*
@@ -159,7 +160,7 @@ int emR3RawStep(PVM pVM, PVMCPU pVCpu)
int rc;
PCPUMCTX pCtx = pVCpu->em.s.pCtx;
bool fGuest = pVCpu->em.s.enmState != EMSTATE_DEBUG_HYPER;
-#ifndef DEBUG_sandervl
+#ifndef DEBUG_sander
Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr\n", fGuest ? CPUMGetGuestCS(pVCpu) : CPUMGetHyperCS(pVCpu),
fGuest ? CPUMGetGuestEIP(pVCpu) : CPUMGetHyperEIP(pVCpu), fGuest ? CPUMGetGuestEFlags(pVCpu) : CPUMGetHyperEFlags(pVCpu)));
#endif
@@ -168,8 +169,8 @@ int emR3RawStep(PVM pVM, PVMCPU pVCpu)
/*
* Check vital forced actions, but ignore pending interrupts and timers.
*/
- if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
{
rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
VBOXVMM_EM_FF_RAW_RET(pVCpu, rc);
@@ -189,20 +190,20 @@ int emR3RawStep(PVM pVM, PVMCPU pVCpu)
* Single step.
* We do not start time or anything, if anything we should just do a few nanoseconds.
*/
- CPUMR3RawEnter(pVCpu, NULL);
+ CPUMRawEnter(pVCpu, NULL);
do
{
if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
rc = VMMR3ResumeHyper(pVM, pVCpu);
else
rc = VMMR3RawRunGC(pVM, pVCpu);
-#ifndef DEBUG_sandervl
+#ifndef DEBUG_sander
Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - GC rc %Rrc\n", fGuest ? CPUMGetGuestCS(pVCpu) : CPUMGetHyperCS(pVCpu),
fGuest ? CPUMGetGuestEIP(pVCpu) : CPUMGetHyperEIP(pVCpu), fGuest ? CPUMGetGuestEFlags(pVCpu) : CPUMGetHyperEFlags(pVCpu), rc));
#endif
} while ( rc == VINF_SUCCESS
|| rc == VINF_EM_RAW_INTERRUPT);
- rc = CPUMR3RawLeave(pVCpu, NULL, rc);
+ rc = CPUMRawLeave(pVCpu, NULL, rc);
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
/*
@@ -237,9 +238,10 @@ int emR3SingleStepExecRaw(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
for (uint32_t i = 0; i < cIterations; i++)
{
DBGFR3PrgStep(pVCpu);
- DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
rc = emR3RawStep(pVM, pVCpu);
- if (rc != VINF_SUCCESS)
+ if ( rc != VINF_SUCCESS
+ && rc != VINF_EM_DBG_STEPPED)
break;
}
Log(("Single step END: rc=%Rrc\n", rc));
@@ -263,30 +265,22 @@ int emR3SingleStepExecRaw(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
* instruction and prefix the log output with this text.
*/
#ifdef LOG_ENABLED
-static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC, const char *pszPrefix)
+static int emR3RawExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC, const char *pszPrefix)
#else
-static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC)
+static int emR3RawExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC)
#endif
{
PCPUMCTX pCtx = pVCpu->em.s.pCtx;
int rc;
- /*
- *
- * The simple solution is to use the recompiler.
- * The better solution is to disassemble the current instruction and
- * try handle as many as possible without using REM.
- *
- */
-
#ifdef LOG_ENABLED
/*
* Disassemble the instruction if requested.
*/
if (pszPrefix)
{
- DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
- DBGFR3DisasInstrCurrentLog(pVCpu, pszPrefix);
+ DBGFR3_INFO_LOG(pVM, "cpumguest", pszPrefix);
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix);
}
#endif /* LOG_ENABLED */
@@ -298,10 +292,10 @@ static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC)
*/
if (PATMIsPatchGCAddr(pVM, pCtx->eip))
{
- Log(("emR3ExecuteInstruction: In patch block. eip=%RRv\n", (RTRCPTR)pCtx->eip));
+ Log(("emR3RawExecuteInstruction: In patch block. eip=%RRv\n", (RTRCPTR)pCtx->eip));
- RTGCPTR pNewEip;
- rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
+ RTGCPTR uNewEip;
+ rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &uNewEip);
switch (rc)
{
/*
@@ -309,9 +303,9 @@ static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC)
* mode; just execute the whole block until IF is set again.
*/
case VINF_SUCCESS:
- Log(("emR3ExecuteInstruction: Executing instruction starting at new address %RGv IF=%d VMIF=%x\n",
- pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
- pCtx->eip = pNewEip;
+ Log(("emR3RawExecuteInstruction: Executing instruction starting at new address %RGv IF=%d VMIF=%x\n",
+ uNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
+ pCtx->eip = uNewEip;
Assert(pCtx->eip);
if (pCtx->eflags.Bits.u1IF)
@@ -320,12 +314,12 @@ static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC)
* The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
*/
Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
- return emR3ExecuteInstruction(pVM, pVCpu, "PATCHIR");
+ return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
}
else if (rcGC == VINF_PATM_PENDING_IRQ_AFTER_IRET)
{
/* special case: iret, that sets IF, detected a pending irq/event */
- return emR3ExecuteInstruction(pVM, pVCpu, "PATCHIRET");
+ return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIRET");
}
return VINF_EM_RESCHEDULE_REM;
@@ -333,25 +327,25 @@ static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC)
* One instruction.
*/
case VINF_PATCH_EMULATE_INSTR:
- Log(("emR3ExecuteInstruction: Emulate patched instruction at %RGv IF=%d VMIF=%x\n",
- pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
- pCtx->eip = pNewEip;
- return emR3ExecuteInstruction(pVM, pVCpu, "PATCHIR");
+ Log(("emR3RawExecuteInstruction: Emulate patched instruction at %RGv IF=%d VMIF=%x\n",
+ uNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
+ pCtx->eip = uNewEip;
+ return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
/*
* The patch was disabled, hand it to the REM.
*/
case VERR_PATCH_DISABLED:
- Log(("emR3ExecuteInstruction: Disabled patch -> new eip %RGv IF=%d VMIF=%x\n",
- pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
- pCtx->eip = pNewEip;
+ Log(("emR3RawExecuteInstruction: Disabled patch -> new eip %RGv IF=%d VMIF=%x\n",
+ uNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
+ pCtx->eip = uNewEip;
if (pCtx->eflags.Bits.u1IF)
{
/*
* The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
*/
Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
- return emR3ExecuteInstruction(pVM, pVCpu, "PATCHIR");
+ return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
}
return VINF_EM_RESCHEDULE_REM;
@@ -365,22 +359,52 @@ static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC)
}
}
- STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, a);
+
+ /*
+ * Use IEM and fallback on REM if the functionality is missing.
+ * Once IEM gets mature enough, nothing should ever fall back.
+ */
+#ifdef VBOX_WITH_FIRST_IEM_STEP
+//# define VBOX_WITH_FIRST_IEM_STEP_B
+#endif
+#if defined(VBOX_WITH_FIRST_IEM_STEP_B) || !defined(VBOX_WITH_REM)
Log(("EMINS: %04x:%RGv RSP=%RGv\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));
-#ifdef VBOX_WITH_REM
- EMRemLock(pVM);
- /* Flush the recompiler TLB if the VCPU has changed. */
- if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
- CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
- pVM->em.s.idLastRemCpu = pVCpu->idCpu;
-
- rc = REMR3EmulateInstruction(pVM, pVCpu);
- EMRemUnlock(pVM);
-#else
+ STAM_PROFILE_START(&pVCpu->em.s.StatIEMEmu, a);
rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu));
+ STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMEmu, a);
+ if (RT_SUCCESS(rc))
+ {
+ if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
+ rc = VINF_EM_RESCHEDULE;
+# ifdef DEBUG_bird
+ else
+ AssertMsgFailed(("%Rrc\n", rc));
+# endif
+ }
+ else if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
+ || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
#endif
- STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, a);
-
+ {
+#ifdef VBOX_WITH_REM
+ STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, b);
+# ifndef VBOX_WITH_FIRST_IEM_STEP_B
+ Log(("EMINS[rem]: %04x:%RGv RSP=%RGv\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));
+//# elif defined(DEBUG_bird)
+// AssertFailed();
+# endif
+ EMRemLock(pVM);
+ /* Flush the recompiler TLB if the VCPU has changed. */
+ if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
+ CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
+ pVM->em.s.idLastRemCpu = pVCpu->idCpu;
+
+ rc = REMR3EmulateInstruction(pVM, pVCpu);
+ EMRemUnlock(pVM);
+ STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, b);
+#else /* !VBOX_WITH_REM */
+ NOREF(pVM);
+#endif /* !VBOX_WITH_REM */
+ }
return rc;
}
@@ -396,12 +420,12 @@ static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC)
* instruction and prefix the log output with this text.
* @param rcGC GC return code
*/
-DECLINLINE(int) emR3ExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)
+DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)
{
#ifdef LOG_ENABLED
- return emR3ExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);
+ return emR3RawExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);
#else
- return emR3ExecuteInstructionWorker(pVM, pVCpu, rcGC);
+ return emR3RawExecuteInstructionWorker(pVM, pVCpu, rcGC);
#endif
}
@@ -412,8 +436,19 @@ DECLINLINE(int) emR3ExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPre
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
*/
-static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
+static int emR3RawExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
{
+#ifdef VBOX_WITH_FIRST_IEM_STEP
+ STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
+
+ /* Hand it over to the interpreter. */
+ VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
+ LogFlow(("emR3RawExecuteIOInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+ STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoIem);
+ STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
+ return VBOXSTRICTRC_TODO(rcStrict);
+
+#else
PCPUMCTX pCtx = pVCpu->em.s.pCtx;
STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
@@ -434,14 +469,14 @@ static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
case OP_IN:
{
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
- rcStrict = IOMInterpretIN(pVM, CPUMCTX2CORE(pCtx), &Cpu);
+ rcStrict = IOMInterpretIN(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
break;
}
case OP_OUT:
{
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
- rcStrict = IOMInterpretOUT(pVM, CPUMCTX2CORE(pCtx), &Cpu);
+ rcStrict = IOMInterpretOUT(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
break;
}
}
@@ -454,7 +489,7 @@ static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
case OP_INSWD:
{
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
- rcStrict = IOMInterpretINS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
+ rcStrict = IOMInterpretINS(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
break;
}
@@ -462,7 +497,7 @@ static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
case OP_OUTSWD:
{
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
- rcStrict = IOMInterpretOUTS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
+ rcStrict = IOMInterpretOUTS(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
break;
}
}
@@ -495,7 +530,8 @@ static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
AssertMsg(rcStrict == VINF_EM_RAW_EMULATE_INSTR || rcStrict == VINF_EM_RESCHEDULE_REM, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
}
STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
- return emR3ExecuteInstruction(pVM, pVCpu, "IO: ");
+ return emR3RawExecuteInstruction(pVM, pVCpu, "IO: ");
+#endif
}
@@ -517,7 +553,7 @@ static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu)
TRPMEVENT enmType;
RTGCUINT uErrorCode;
RTGCUINTPTR uCR2;
- int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
+ int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2, NULL /* pu8InstrLen */);
if (RT_FAILURE(rc))
{
AssertReleaseMsgFailed(("No trap! (rc=%Rrc)\n", rc));
@@ -538,7 +574,7 @@ static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu)
&& PATMIsPatchGCAddr(pVM, pCtx->eip))
{
LogFlow(("emR3RawGuestTrap: trap %#x in patch code; eip=%08x\n", u8TrapNo, pCtx->eip));
- return emR3PatchTrap(pVM, pVCpu, pCtx, rc);
+ return emR3RawPatchTrap(pVM, pVCpu, pCtx, rc);
}
#endif
@@ -607,7 +643,7 @@ static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu)
rc = VBOXSTRICTRC_TODO(EMInterpretInstructionDisasState(pVCpu, &cpu, CPUMCTX2CORE(pCtx), 0, EMCODETYPE_SUPERVISOR));
if (RT_SUCCESS(rc))
return rc;
- return emR3ExecuteInstruction(pVM, pVCpu, "Monitor: ");
+ return emR3RawExecuteInstruction(pVM, pVCpu, "Monitor: ");
}
}
}
@@ -629,13 +665,13 @@ static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu)
*/
rc = TRPMResetTrap(pVCpu);
AssertRC(rc);
- return emR3ExecuteInstruction(pVM, pVCpu, "IO Guest Trap: ");
+ return emR3RawExecuteInstruction(pVM, pVCpu, "IO Guest Trap: ");
}
}
#ifdef LOG_ENABLED
- DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");
- DBGFR3DisasInstrCurrentLog(pVCpu, "Guest trap");
+ DBGFR3_INFO_LOG(pVM, "cpumguest", "Guest trap");
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Guest trap");
/* Get guest page information. */
uint64_t fFlags = 0;
@@ -687,7 +723,7 @@ static int emR3RawRingSwitch(PVM pVM, PVMCPU pVCpu)
CPUMGetGuestCodeBits(pVCpu) == 32 ? PATMFL_CODE32 : 0);
if (RT_SUCCESS(rc))
{
- DBGFR3DisasInstrCurrentLog(pVCpu, "Patched sysenter instruction");
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Patched sysenter instruction");
return VINF_EM_RESCHEDULE_RAW;
}
}
@@ -715,7 +751,7 @@ static int emR3RawRingSwitch(PVM pVM, PVMCPU pVCpu)
AssertRC(rc);
/* go to the REM to emulate a single instruction */
- return emR3ExecuteInstruction(pVM, pVCpu, "RSWITCH: ");
+ return emR3RawExecuteInstruction(pVM, pVCpu, "RSWITCH: ");
}
@@ -728,7 +764,7 @@ static int emR3RawRingSwitch(PVM pVM, PVMCPU pVCpu)
* @param pCtx Pointer to the guest CPU context.
* @param gcret GC return code.
*/
-static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret)
+static int emR3RawPatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret)
{
uint8_t u8TrapNo;
int rc;
@@ -753,10 +789,10 @@ static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret)
}
else
{
- rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
+ rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2, NULL /* pu8InstrLen */);
if (RT_FAILURE(rc))
{
- AssertReleaseMsgFailed(("emR3PatchTrap: no trap! (rc=%Rrc) gcret=%Rrc\n", rc, gcret));
+ AssertReleaseMsgFailed(("emR3RawPatchTrap: no trap! (rc=%Rrc) gcret=%Rrc\n", rc, gcret));
return rc;
}
/* Reset the trap as we'll execute the original instruction again. */
@@ -770,8 +806,8 @@ static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret)
if (u8TrapNo != 1)
{
#ifdef LOG_ENABLED
- DBGFR3InfoLog(pVM, "cpumguest", "Trap in patch code");
- DBGFR3DisasInstrCurrentLog(pVCpu, "Patch code");
+ DBGFR3_INFO_LOG(pVM, "cpumguest", "Trap in patch code");
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Patch code");
DISCPUSTATE Cpu;
rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->eip, &Cpu, "Patch code: ");
@@ -815,11 +851,11 @@ static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret)
}
}
#endif /* LOG_ENABLED */
- Log(("emR3PatchTrap: in patch: eip=%08x: trap=%02x err=%08x cr2=%08x cr0=%08x\n",
+ Log(("emR3RawPatchTrap: in patch: eip=%08x: trap=%02x err=%08x cr2=%08x cr0=%08x\n",
pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0));
- RTGCPTR pNewEip;
- rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
+ RTGCPTR uNewEip;
+ rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &uNewEip);
switch (rc)
{
/*
@@ -828,11 +864,11 @@ static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret)
case VINF_SUCCESS:
{
/** @todo execute a whole block */
- Log(("emR3PatchTrap: Executing faulting instruction at new address %RGv\n", pNewEip));
+ Log(("emR3RawPatchTrap: Executing faulting instruction at new address %RGv\n", uNewEip));
if (!(pVCpu->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
- Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
+ Log(("emR3RawPatchTrap: Virtual IF flag disabled!!\n"));
- pCtx->eip = pNewEip;
+ pCtx->eip = uNewEip;
AssertRelease(pCtx->eip);
if (pCtx->eflags.Bits.u1IF)
@@ -851,7 +887,7 @@ static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret)
/** @todo Knoppix 5 regression when returning VINF_SUCCESS here and going back to raw mode. */
/* Note: possibly because a reschedule is required (e.g. iret to V86 code) */
- return emR3ExecuteInstruction(pVM, pVCpu, "PATCHIR");
+ return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
/* Interrupts are enabled; just go back to the original instruction.
return VINF_SUCCESS; */
}
@@ -862,19 +898,19 @@ static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret)
* One instruction.
*/
case VINF_PATCH_EMULATE_INSTR:
- Log(("emR3PatchTrap: Emulate patched instruction at %RGv IF=%d VMIF=%x\n",
- pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
- pCtx->eip = pNewEip;
+ Log(("emR3RawPatchTrap: Emulate patched instruction at %RGv IF=%d VMIF=%x\n",
+ uNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
+ pCtx->eip = uNewEip;
AssertRelease(pCtx->eip);
- return emR3ExecuteInstruction(pVM, pVCpu, "PATCHEMUL: ");
+ return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHEMUL: ");
/*
* The patch was disabled, hand it to the REM.
*/
case VERR_PATCH_DISABLED:
if (!(pVCpu->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
- Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
- pCtx->eip = pNewEip;
+ Log(("emR3RawPatchTrap: Virtual IF flag disabled!!\n"));
+ pCtx->eip = uNewEip;
AssertRelease(pCtx->eip);
if (pCtx->eflags.Bits.u1IF)
@@ -883,7 +919,7 @@ static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret)
* The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
*/
Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
- return emR3ExecuteInstruction(pVM, pVCpu, "PATCHIR");
+ return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");
}
return VINF_EM_RESCHEDULE_REM;
@@ -924,7 +960,7 @@ static int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu)
if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))
{
#ifdef LOG_ENABLED
- DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
+ DBGFR3_INFO_LOG(pVM, "cpumguest", "PRIV");
#endif
AssertMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08x\n", pCtx->eip));
return VERR_EM_RAW_PATCH_CONFLICT;
@@ -938,9 +974,9 @@ static int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu)
if (RT_SUCCESS(rc))
{
#ifdef LOG_ENABLED
- DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
+ DBGFR3_INFO_LOG(pVM, "cpumguest", "PRIV");
#endif
- DBGFR3DisasInstrCurrentLog(pVCpu, "Patched privileged instruction");
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Patched privileged instruction");
return VINF_SUCCESS;
}
}
@@ -949,8 +985,8 @@ static int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu)
#ifdef LOG_ENABLED
if (!PATMIsPatchGCAddr(pVM, pCtx->eip))
{
- DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
- DBGFR3DisasInstrCurrentLog(pVCpu, "Privileged instr: ");
+ DBGFR3_INFO_LOG(pVM, "cpumguest", "PRIV");
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Privileged instr");
}
#endif
@@ -1089,8 +1125,8 @@ static int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu)
#ifdef LOG_ENABLED
if (PATMIsPatchGCAddr(pVM, pCtx->eip))
{
- DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
- DBGFR3DisasInstrCurrentLog(pVCpu, "Privileged instr: ");
+ DBGFR3_INFO_LOG(pVM, "cpumguest", "PRIV");
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Privileged instr");
}
#endif
@@ -1140,9 +1176,9 @@ static int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu)
}
if (PATMIsPatchGCAddr(pVM, pCtx->eip))
- return emR3PatchTrap(pVM, pVCpu, pCtx, VINF_PATM_PATCH_TRAP_GP);
+ return emR3RawPatchTrap(pVM, pVCpu, pCtx, VINF_PATM_PATCH_TRAP_GP);
- return emR3ExecuteInstruction(pVM, pVCpu, "PRIV");
+ return emR3RawExecuteInstruction(pVM, pVCpu, "PRIV");
}
@@ -1193,7 +1229,7 @@ int emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
*/
-VMMR3DECL(int) EMR3CheckRawForcedActions(PVM pVM, PVMCPU pVCpu)
+VMMR3_INT_DECL(int) EMR3CheckRawForcedActions(PVM pVM, PVMCPU pVCpu)
{
int rc = emR3RawForcedActions(pVM, pVCpu, pVCpu->em.s.pCtx);
VBOXVMM_EM_FF_RAW_RET(pVCpu, rc);
@@ -1223,7 +1259,7 @@ static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
/*
* Sync selector tables.
*/
- if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT))
+ if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT))
{
VBOXSTRICTRC rcStrict = SELMR3UpdateFromCPUM(pVM, pVCpu);
if (rcStrict != VINF_SUCCESS)
@@ -1237,13 +1273,13 @@ static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
* and PGMShwModifyPage, so we're in for trouble if for instance a
* PGMSyncCR3+pgmR3PoolClearAll is pending.
*/
- if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TRPM_SYNC_IDT))
+ if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TRPM_SYNC_IDT))
{
- if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)
+ if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)
&& EMIsRawRing0Enabled(pVM)
&& CSAMIsEnabled(pVM))
{
- int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
+ int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
if (RT_FAILURE(rc))
return rc;
}
@@ -1256,7 +1292,7 @@ static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
/*
* Sync TSS.
*/
- if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
+ if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
{
int rc = SELMR3SyncTSS(pVM, pVCpu);
if (RT_FAILURE(rc))
@@ -1266,14 +1302,14 @@ static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
/*
* Sync page directory.
*/
- if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
+ if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
{
Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
- int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
+ int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
if (RT_FAILURE(rc))
return rc;
- Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
+ Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
/* Prefetch pages for EIP and ESP. */
/** @todo This is rather expensive. Should investigate if it really helps at all. */
@@ -1287,12 +1323,12 @@ static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
AssertLogRelMsgReturn(RT_FAILURE(rc), ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
return rc;
}
- rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
+ rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
if (RT_FAILURE(rc))
return rc;
}
/** @todo maybe prefetch the supervisor stack page as well */
- Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
+ Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
}
/*
@@ -1312,7 +1348,7 @@ static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
* since we ran FFs. The allocate handy pages must for instance always be followed by
* this check.
*/
- if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
+ if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
return VINF_EM_NO_MEMORY;
return VINF_SUCCESS;
@@ -1361,11 +1397,12 @@ int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
# ifdef VBOX_WITH_REM
Assert(REMR3QueryPendingInterrupt(pVM, pVCpu) == REM_NO_PENDING_IRQ);
# endif
- Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL) == 3 || (pCtx->ss.Sel & X86_SEL_RPL) == 0);
+ Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL) == 3 || (pCtx->ss.Sel & X86_SEL_RPL) == 0
+ || (EMIsRawRing1Enabled(pVM) && (pCtx->ss.Sel & X86_SEL_RPL) == 1));
AssertMsg( (pCtx->eflags.u32 & X86_EFL_IF)
|| PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip),
("Tried to execute code with IF at EIP=%08x!\n", pCtx->eip));
- if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
+ if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
&& PGMMapHasConflicts(pVM))
{
PGMMapCheck(pVM);
@@ -1377,8 +1414,8 @@ int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
/*
* Process high priority pre-execution raw-mode FFs.
*/
- if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
{
rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
VBOXVMM_EM_FF_RAW_RET(pVCpu, rc);
@@ -1391,7 +1428,7 @@ int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
* be modified a bit and some of the state components (IF, SS/CS RPL,
* and perhaps EIP) needs to be stored with PATM.
*/
- rc = CPUMR3RawEnter(pVCpu, NULL);
+ rc = CPUMRawEnter(pVCpu, NULL);
if (rc != VINF_SUCCESS)
{
STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWEntry, b);
@@ -1408,14 +1445,14 @@ int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
STAM_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWEntry, b);
CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
STAM_PROFILE_ADV_RESUME(&pVCpu->em.s.StatRAWEntry, b);
- if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
{
rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
VBOXVMM_EM_FF_RAW_RET(pVCpu, rc);
if (rc != VINF_SUCCESS)
{
- rc = CPUMR3RawLeave(pVCpu, NULL, rc);
+ rc = CPUMRawLeave(pVCpu, NULL, rc);
break;
}
}
@@ -1429,11 +1466,15 @@ int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
if (pCtx->eflags.Bits.u1VM)
Log(("RV86: %04x:%08x IF=%d VMFlags=%x\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
else if ((pCtx->ss.Sel & X86_SEL_RPL) == 1)
- Log(("RR0: %08x ESP=%08x EFL=%x IF=%d/%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n",
- pCtx->eip, pCtx->esp, CPUMRawGetEFlags(pVCpu), !!(pGCState->uVMFlags & X86_EFL_IF), pCtx->eflags.Bits.u1IF,
+ Log(("RR0: %x:%08x ESP=%x:%08x EFL=%x IF=%d/%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n",
+ pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, CPUMRawGetEFlags(pVCpu), !!(pGCState->uVMFlags & X86_EFL_IF), pCtx->eflags.Bits.u1IF,
pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss.Sel & X86_SEL_RPL), CSAMIsPageScanned(pVM, (RTGCPTR)pCtx->eip)));
+# ifdef VBOX_WITH_RAW_RING1
+ else if ((pCtx->ss.Sel & X86_SEL_RPL) == 2)
+ Log(("RR1: %x:%08x ESP=%x:%08x IF=%d VMFlags=%x CPL=%x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, (pCtx->ss.Sel & X86_SEL_RPL)));
+# endif
else if ((pCtx->ss.Sel & X86_SEL_RPL) == 3)
- Log(("RR3: %08x ESP=%08x IF=%d VMFlags=%x\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
+ Log(("RR3: %x:%08x ESP=%x:%08x IF=%d VMFlags=%x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
#endif /* LOG_ENABLED */
@@ -1442,7 +1483,7 @@ int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
* Execute the code.
*/
STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWEntry, b);
- if (RT_LIKELY(EMR3IsExecutionAllowed(pVM, pVCpu)))
+ if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
{
STAM_PROFILE_START(&pVCpu->em.s.StatRAWExec, c);
VBOXVMM_EM_RAW_RUN_PRE(pVCpu, pCtx);
@@ -1471,17 +1512,17 @@ int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
* Restore the real CPU state and deal with high priority post
* execution FFs before doing anything else.
*/
- rc = CPUMR3RawLeave(pVCpu, NULL, rc);
+ rc = CPUMRawLeave(pVCpu, NULL, rc);
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
- if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
#ifdef VBOX_STRICT
/*
* Assert TSS consistency & rc vs patch code.
*/
- if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT) /* GDT implies TSS at the moment. */
+ if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT) /* GDT implies TSS at the moment. */
&& EMIsRawRing0Enabled(pVM))
SELMR3CheckTSS(pVM);
switch (rc)
@@ -1505,7 +1546,7 @@ int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
/*
* Let's go paranoid!
*/
- if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
+ if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
&& PGMMapHasConflicts(pVM))
{
PGMMapCheck(pVM);
@@ -1540,10 +1581,10 @@ int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
TMTimerPollVoid(pVM, pVCpu);
#endif
STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
- if ( VM_FF_ISPENDING(pVM, ~VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PGM_NO_MEMORY)
- || VMCPU_FF_ISPENDING(pVCpu, ~VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
+ if ( VM_FF_IS_PENDING(pVM, ~VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PGM_NO_MEMORY)
+ || VMCPU_FF_IS_PENDING(pVCpu, ~VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
{
- Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL) != 1);
+ Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL) != (EMIsRawRing1Enabled(pVM) ? 2 : 1));
STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWTotal, a);
rc = emR3ForcedActions(pVM, pVCpu, rc);
diff --git a/src/VBox/VMM/VMMR3/FTM.cpp b/src/VBox/VMM/VMMR3/FTM.cpp
index ef9e468a..2318f910 100644
--- a/src/VBox/VMM/VMMR3/FTM.cpp
+++ b/src/VBox/VMM/VMMR3/FTM.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2010 Oracle Corporation
+ * Copyright (C) 2010-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -20,15 +20,18 @@
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_FTM
+#include <VBox/vmm/ftm.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/pdm.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/vmm.h>
#include "FTMInternal.h"
#include <VBox/vmm/vm.h>
-#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/uvm.h>
#include <VBox/err.h>
#include <VBox/param.h>
-#include <VBox/vmm/ssm.h>
#include <VBox/log.h>
-#include <VBox/vmm/pgm.h>
-#include <VBox/vmm/pdm.h>
#include <iprt/assert.h>
#include <iprt/thread.h>
@@ -39,10 +42,9 @@
#include <iprt/semaphore.h>
#include <iprt/asm.h>
-#include "internal/vm.h"
-#include "internal/em.h"
#include "internal/pgm.h"
+
/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
@@ -100,7 +102,7 @@ static DECLCALLBACK(int) ftmR3PageTreeDestroyCallback(PAVLGCPHYSNODECORE pBaseNo
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) FTMR3Init(PVM pVM)
+VMMR3_INT_DECL(int) FTMR3Init(PVM pVM)
{
/*
* Assert alignment and sizes.
@@ -156,7 +158,7 @@ VMMR3DECL(int) FTMR3Init(PVM pVM)
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) FTMR3Term(PVM pVM)
+VMMR3_INT_DECL(int) FTMR3Term(PVM pVM)
{
if (pVM->ftm.s.hShutdownEvent != NIL_RTSEMEVENT)
{
@@ -195,7 +197,7 @@ VMMR3DECL(int) FTMR3Term(PVM pVM)
static int ftmR3TcpWriteACK(PVM pVM)
{
- int rc = RTTcpWrite(pVM->ftm.s.hSocket, "ACK\n", sizeof("ACK\n") - 1);
+ int rc = RTTcpWrite(pVM->ftm.s.hSocket, RT_STR_TUPLE("ACK\n"));
if (RT_FAILURE(rc))
{
LogRel(("FTSync: RTTcpWrite(,ACK,) -> %Rrc\n", rc));
@@ -283,7 +285,7 @@ static int ftmR3TcpReadACK(PVM pVM, const char *pszWhich, const char *pszNAckMsg
if (!strcmp(szMsg, "ACK"))
return VINF_SUCCESS;
- if (!strncmp(szMsg, "NACK=", sizeof("NACK=") - 1))
+ if (!strncmp(szMsg, RT_STR_TUPLE("NACK=")))
{
char *pszMsgText = strchr(szMsg, ';');
if (pszMsgText)
@@ -331,7 +333,7 @@ static int ftmR3TcpReadACK(PVM pVM, const char *pszWhich, const char *pszNAckMsg
*/
static int ftmR3TcpSubmitCommand(PVM pVM, const char *pszCommand, bool fWaitForAck = true)
{
- int rc = RTTcpSgWriteL(pVM->ftm.s.hSocket, 2, pszCommand, strlen(pszCommand), "\n", sizeof("\n") - 1);
+ int rc = RTTcpSgWriteL(pVM->ftm.s.hSocket, 2, pszCommand, strlen(pszCommand), RT_STR_TUPLE("\n"));
if (RT_FAILURE(rc))
return rc;
if (!fWaitForAck)
@@ -635,7 +637,7 @@ static int ftmR3PerformFullSync(PVM pVM)
{
bool fSuspended = false;
- int rc = VMR3Suspend(pVM);
+ int rc = VMR3Suspend(pVM->pUVM, VMSUSPENDREASON_FTM_SYNC);
AssertRCReturn(rc, rc);
STAM_REL_COUNTER_INC(&pVM->ftm.s.StatFullSync);
@@ -653,7 +655,7 @@ static int ftmR3PerformFullSync(PVM pVM)
AssertRC(rc);
pVM->ftm.s.fDeltaLoadSaveActive = false;
- rc = VMR3SaveFT(pVM, &g_ftmR3TcpOps, pVM, &fSuspended, false /* fSkipStateChanges */);
+ rc = VMR3SaveFT(pVM->pUVM, &g_ftmR3TcpOps, pVM, &fSuspended, false /* fSkipStateChanges */);
AssertRC(rc);
rc = ftmR3TcpReadACK(pVM, "full-sync-complete");
@@ -665,7 +667,7 @@ static int ftmR3PerformFullSync(PVM pVM)
rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)ftmR3WriteProtectMemory, 1, pVM);
AssertRCReturn(rc, rc);
- rc = VMR3Resume(pVM);
+ rc = VMR3Resume(pVM->pUVM, VMRESUMEREASON_FTM_SYNC);
AssertRC(rc);
return rc;
@@ -714,6 +716,7 @@ static DECLCALLBACK(int) ftmR3SyncDirtyPage(PVM pVM, RTGCPHYS GCPhys, uint8_t *p
break;
case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
+ case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
AssertFailed();
break;
@@ -1088,7 +1091,7 @@ static DECLCALLBACK(int) ftmR3StandbyServeConnection(RTSOCKET Sock, void *pvUser
pVM->ftm.s.syncstate.fEndOfStream = false;
pVM->ftm.s.fDeltaLoadSaveActive = (fFullSync == false);
- rc = VMR3LoadFromStreamFT(pVM, &g_ftmR3TcpOps, pVM);
+ rc = VMR3LoadFromStreamFT(pVM->pUVM, &g_ftmR3TcpOps, pVM);
pVM->ftm.s.fDeltaLoadSaveActive = false;
RTSocketRelease(pVM->ftm.s.hSocket);
AssertRC(rc);
@@ -1123,7 +1126,7 @@ static DECLCALLBACK(int) ftmR3StandbyServeConnection(RTSOCKET Sock, void *pvUser
*
* @returns VBox status code.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param fMaster FT master or standby
* @param uInterval FT sync interval
* @param pszAddress Standby VM address
@@ -1134,9 +1137,12 @@ static DECLCALLBACK(int) ftmR3StandbyServeConnection(RTSOCKET Sock, void *pvUser
* @vmstate Created
* @vmstateto PoweringOn+Running (master), PoweringOn+Running_FT (standby)
*/
-VMMR3DECL(int) FTMR3PowerOn(PVM pVM, bool fMaster, unsigned uInterval, const char *pszAddress, unsigned uPort, const char *pszPassword)
+VMMR3DECL(int) FTMR3PowerOn(PUVM pUVM, bool fMaster, unsigned uInterval,
+ const char *pszAddress, unsigned uPort, const char *pszPassword)
{
- int rc = VINF_SUCCESS;
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
VMSTATE enmVMState = VMR3GetState(pVM);
AssertMsgReturn(enmVMState == VMSTATE_CREATED,
@@ -1154,7 +1160,7 @@ VMMR3DECL(int) FTMR3PowerOn(PVM pVM, bool fMaster, unsigned uInterval, const cha
if (pszPassword)
pVM->ftm.s.pszPassword = RTStrDup(pszPassword);
- rc = RTSemEventCreate(&pVM->ftm.s.hShutdownEvent);
+ int rc = RTSemEventCreate(&pVM->ftm.s.hShutdownEvent);
if (RT_FAILURE(rc))
return rc;
@@ -1174,36 +1180,35 @@ VMMR3DECL(int) FTMR3PowerOn(PVM pVM, bool fMaster, unsigned uInterval, const cha
}
/** @todo might need to disable page fusion as well */
- return VMR3PowerOn(pVM);
+ return VMR3PowerOn(pVM->pUVM);
}
- else
- {
- /* standby */
- rc = RTThreadCreate(NULL, ftmR3StandbyThread, pVM,
- 0, RTTHREADTYPE_DEFAULT, 0, "ftmStandby");
- if (RT_FAILURE(rc))
- return rc;
- rc = RTTcpServerCreateEx(pszAddress, uPort, &pVM->ftm.s.standby.hServer);
- if (RT_FAILURE(rc))
- return rc;
- pVM->ftm.s.fIsStandbyNode = true;
- rc = RTTcpServerListen(pVM->ftm.s.standby.hServer, ftmR3StandbyServeConnection, pVM);
- /** @todo deal with the exit code to check if we should activate this standby VM. */
- if (pVM->ftm.s.fActivateStandby)
- {
- /** @todo fallover. */
- }
+ /* standby */
+ rc = RTThreadCreate(NULL, ftmR3StandbyThread, pVM,
+ 0, RTTHREADTYPE_DEFAULT, 0, "ftmStandby");
+ if (RT_FAILURE(rc))
+ return rc;
- if (pVM->ftm.s.standby.hServer)
- {
- RTTcpServerDestroy(pVM->ftm.s.standby.hServer);
- pVM->ftm.s.standby.hServer = NULL;
- }
- if (rc == VERR_TCP_SERVER_SHUTDOWN)
- rc = VINF_SUCCESS; /* ignore this error; the standby process was cancelled. */
+ rc = RTTcpServerCreateEx(pszAddress, uPort, &pVM->ftm.s.standby.hServer);
+ if (RT_FAILURE(rc))
+ return rc;
+ pVM->ftm.s.fIsStandbyNode = true;
+
+ rc = RTTcpServerListen(pVM->ftm.s.standby.hServer, ftmR3StandbyServeConnection, pVM);
+ /** @todo deal with the exit code to check if we should activate this standby VM. */
+ if (pVM->ftm.s.fActivateStandby)
+ {
+ /** @todo fallover. */
+ }
+
+ if (pVM->ftm.s.standby.hServer)
+ {
+ RTTcpServerDestroy(pVM->ftm.s.standby.hServer);
+ pVM->ftm.s.standby.hServer = NULL;
}
+ if (rc == VERR_TCP_SERVER_SHUTDOWN)
+ rc = VINF_SUCCESS; /* ignore this error; the standby process was cancelled. */
return rc;
}
@@ -1212,10 +1217,13 @@ VMMR3DECL(int) FTMR3PowerOn(PVM pVM, bool fMaster, unsigned uInterval, const cha
*
* @returns VBox status code.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
*/
-VMMR3DECL(int) FTMR3CancelStandby(PVM pVM)
+VMMR3DECL(int) FTMR3CancelStandby(PUVM pUVM)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
AssertReturn(!pVM->fFaultTolerantMaster, VERR_NOT_SUPPORTED);
Assert(pVM->ftm.s.standby.hServer);
@@ -1266,7 +1274,7 @@ static DECLCALLBACK(VBOXSTRICTRC) ftmR3SetCheckpointRendezvous(PVM pVM, PVMCPU p
AssertRC(rc);
pVM->ftm.s.fDeltaLoadSaveActive = true;
- rc = VMR3SaveFT(pVM, &g_ftmR3TcpOps, pVM, &fSuspended, true /* fSkipStateChanges */);
+ rc = VMR3SaveFT(pVM->pUVM, &g_ftmR3TcpOps, pVM, &fSuspended, true /* fSkipStateChanges */);
pVM->ftm.s.fDeltaLoadSaveActive = false;
AssertRC(rc);
@@ -1301,7 +1309,7 @@ static DECLCALLBACK(VBOXSTRICTRC) ftmR3SetCheckpointRendezvous(PVM pVM, PVMCPU p
* @param pVM Pointer to the VM.
* @param enmCheckpoint Checkpoint type
*/
-VMMR3DECL(int) FTMR3SetCheckpoint(PVM pVM, FTMCHECKPOINTTYPE enmCheckpoint)
+VMMR3_INT_DECL(int) FTMR3SetCheckpoint(PVM pVM, FTMCHECKPOINTTYPE enmCheckpoint)
{
int rc;
@@ -1310,17 +1318,18 @@ VMMR3DECL(int) FTMR3SetCheckpoint(PVM pVM, FTMCHECKPOINTTYPE enmCheckpoint)
switch (enmCheckpoint)
{
- case FTMCHECKPOINTTYPE_NETWORK:
- STAM_REL_COUNTER_INC(&pVM->ftm.s.StatCheckpointNetwork);
- break;
+ case FTMCHECKPOINTTYPE_NETWORK:
+ STAM_REL_COUNTER_INC(&pVM->ftm.s.StatCheckpointNetwork);
+ break;
- case FTMCHECKPOINTTYPE_STORAGE:
- STAM_REL_COUNTER_INC(&pVM->ftm.s.StatCheckpointStorage);
- break;
+ case FTMCHECKPOINTTYPE_STORAGE:
+ STAM_REL_COUNTER_INC(&pVM->ftm.s.StatCheckpointStorage);
+ break;
- default:
- break;
+ default:
+ AssertMsgFailedReturn(("%d\n", enmCheckpoint), VERR_INVALID_PARAMETER);
}
+
pVM->ftm.s.fCheckpointingActive = true;
if (VM_IS_EMT(pVM))
{
@@ -1329,13 +1338,13 @@ VMMR3DECL(int) FTMR3SetCheckpoint(PVM pVM, FTMCHECKPOINTTYPE enmCheckpoint)
/* We must take special care here as the memory sync is competing with us and requires a responsive EMT. */
while ((rc = PDMCritSectTryEnter(&pVM->ftm.s.CritSect)) == VERR_SEM_BUSY)
{
- if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
+ if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
{
rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
AssertRC(rc);
}
- if (VM_FF_ISPENDING(pVM, VM_FF_REQUEST))
+ if (VM_FF_IS_PENDING(pVM, VM_FF_REQUEST))
{
rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, true /*fPriorityOnly*/);
AssertRC(rc);
diff --git a/src/VBox/VMM/VMMR3/HM.cpp b/src/VBox/VMM/VMMR3/HM.cpp
new file mode 100644
index 00000000..a950fe42
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/HM.cpp
@@ -0,0 +1,3169 @@
+/* $Id: HM.cpp $ */
+/** @file
+ * HM - Intel/AMD VM Hardware Support Manager.
+ */
+
+/*
+ * Copyright (C) 2006-2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*******************************************************************************
+* Header Files *
+*******************************************************************************/
+#define LOG_GROUP LOG_GROUP_HM
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/patm.h>
+#include <VBox/vmm/csam.h>
+#include <VBox/vmm/selm.h>
+#ifdef VBOX_WITH_REM
+# include <VBox/vmm/rem.h>
+#endif
+#include <VBox/vmm/hm_vmx.h>
+#include <VBox/vmm/hm_svm.h>
+#include "HMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/err.h>
+#include <VBox/param.h>
+
+#include <iprt/assert.h>
+#include <VBox/log.h>
+#include <iprt/asm.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/string.h>
+#include <iprt/env.h>
+#include <iprt/thread.h>
+
+
+/*******************************************************************************
+* Global Variables *
+*******************************************************************************/
+#ifdef VBOX_WITH_STATISTICS
+# define EXIT_REASON(def, val, str) #def " - " #val " - " str
+# define EXIT_REASON_NIL() NULL
+/** Exit reason descriptions for VT-x, used to describe statistics. */
+static const char * const g_apszVTxExitReasons[MAX_EXITREASON_STAT] =
+{
+ EXIT_REASON(VMX_EXIT_XCPT_OR_NMI , 0, "Exception or non-maskable interrupt (NMI)."),
+ EXIT_REASON(VMX_EXIT_EXT_INT , 1, "External interrupt."),
+ EXIT_REASON(VMX_EXIT_TRIPLE_FAULT , 2, "Triple fault."),
+ EXIT_REASON(VMX_EXIT_INIT_SIGNAL , 3, "INIT signal."),
+ EXIT_REASON(VMX_EXIT_SIPI , 4, "Start-up IPI (SIPI)."),
+ EXIT_REASON(VMX_EXIT_IO_SMI_IRQ , 5, "I/O system-management interrupt (SMI)."),
+ EXIT_REASON(VMX_EXIT_SMI_IRQ , 6, "Other SMI."),
+ EXIT_REASON(VMX_EXIT_INT_WINDOW , 7, "Interrupt window."),
+ EXIT_REASON_NIL(),
+ EXIT_REASON(VMX_EXIT_TASK_SWITCH , 9, "Task switch."),
+ EXIT_REASON(VMX_EXIT_CPUID , 10, "Guest attempted to execute CPUID."),
+ EXIT_REASON_NIL(),
+ EXIT_REASON(VMX_EXIT_HLT , 12, "Guest attempted to execute HLT."),
+ EXIT_REASON(VMX_EXIT_INVD , 13, "Guest attempted to execute INVD."),
+ EXIT_REASON(VMX_EXIT_INVLPG , 14, "Guest attempted to execute INVLPG."),
+ EXIT_REASON(VMX_EXIT_RDPMC , 15, "Guest attempted to execute RDPMC."),
+ EXIT_REASON(VMX_EXIT_RDTSC , 16, "Guest attempted to execute RDTSC."),
+ EXIT_REASON(VMX_EXIT_RSM , 17, "Guest attempted to execute RSM in SMM."),
+ EXIT_REASON(VMX_EXIT_VMCALL , 18, "Guest attempted to execute VMCALL."),
+ EXIT_REASON(VMX_EXIT_VMCLEAR , 19, "Guest attempted to execute VMCLEAR."),
+ EXIT_REASON(VMX_EXIT_VMLAUNCH , 20, "Guest attempted to execute VMLAUNCH."),
+ EXIT_REASON(VMX_EXIT_VMPTRLD , 21, "Guest attempted to execute VMPTRLD."),
+ EXIT_REASON(VMX_EXIT_VMPTRST , 22, "Guest attempted to execute VMPTRST."),
+ EXIT_REASON(VMX_EXIT_VMREAD , 23, "Guest attempted to execute VMREAD."),
+ EXIT_REASON(VMX_EXIT_VMRESUME , 24, "Guest attempted to execute VMRESUME."),
+ EXIT_REASON(VMX_EXIT_VMWRITE , 25, "Guest attempted to execute VMWRITE."),
+ EXIT_REASON(VMX_EXIT_VMXOFF , 26, "Guest attempted to execute VMXOFF."),
+ EXIT_REASON(VMX_EXIT_VMXON , 27, "Guest attempted to execute VMXON."),
+ EXIT_REASON(VMX_EXIT_MOV_CRX , 28, "Control-register accesses."),
+ EXIT_REASON(VMX_EXIT_MOV_DRX , 29, "Debug-register accesses."),
+ EXIT_REASON(VMX_EXIT_PORT_IO , 30, "I/O instruction."),
+ EXIT_REASON(VMX_EXIT_RDMSR , 31, "Guest attempted to execute RDMSR."),
+ EXIT_REASON(VMX_EXIT_WRMSR , 32, "Guest attempted to execute WRMSR."),
+ EXIT_REASON(VMX_EXIT_ERR_INVALID_GUEST_STATE, 33, "VM-entry failure due to invalid guest state."),
+ EXIT_REASON(VMX_EXIT_ERR_MSR_LOAD , 34, "VM-entry failure due to MSR loading."),
+ EXIT_REASON_NIL(),
+ EXIT_REASON(VMX_EXIT_MWAIT , 36, "Guest executed MWAIT."),
+ EXIT_REASON(VMX_EXIT_MTF , 37, "Monitor Trap Flag."),
+ EXIT_REASON_NIL(),
+ EXIT_REASON(VMX_EXIT_MONITOR , 39, "Guest attempted to execute MONITOR."),
+ EXIT_REASON(VMX_EXIT_PAUSE , 40, "Guest attempted to execute PAUSE."),
+ EXIT_REASON(VMX_EXIT_ERR_MACHINE_CHECK , 41, "VM-entry failure due to machine-check."),
+ EXIT_REASON_NIL(),
+ EXIT_REASON(VMX_EXIT_TPR_BELOW_THRESHOLD, 43, "TPR below threshold. Guest attempted to execute MOV to CR8."),
+ EXIT_REASON(VMX_EXIT_APIC_ACCESS , 44, "APIC access. Guest attempted to access memory at a physical address on the APIC-access page."),
+ EXIT_REASON_NIL(),
+ EXIT_REASON(VMX_EXIT_XDTR_ACCESS , 46, "Access to GDTR or IDTR. Guest attempted to execute LGDT, LIDT, SGDT, or SIDT."),
+ EXIT_REASON(VMX_EXIT_TR_ACCESS , 47, "Access to LDTR or TR. Guest attempted to execute LLDT, LTR, SLDT, or STR."),
+ EXIT_REASON(VMX_EXIT_EPT_VIOLATION , 48, "EPT violation. An attempt to access memory with a guest-physical address was disallowed by the configuration of the EPT paging structures."),
+ EXIT_REASON(VMX_EXIT_EPT_MISCONFIG , 49, "EPT misconfiguration. An attempt to access memory with a guest-physical address encountered a misconfigured EPT paging-structure entry."),
+ EXIT_REASON(VMX_EXIT_INVEPT , 50, "Guest attempted to execute INVEPT."),
+ EXIT_REASON(VMX_EXIT_RDTSCP , 51, "Guest attempted to execute RDTSCP."),
+ EXIT_REASON(VMX_EXIT_PREEMPT_TIMER , 52, "VMX-preemption timer expired."),
+ EXIT_REASON(VMX_EXIT_INVVPID , 53, "Guest attempted to execute INVVPID."),
+ EXIT_REASON(VMX_EXIT_WBINVD , 54, "Guest attempted to execute WBINVD."),
+ EXIT_REASON(VMX_EXIT_XSETBV , 55, "Guest attempted to execute XSETBV."),
+ EXIT_REASON_NIL(),
+ EXIT_REASON(VMX_EXIT_RDRAND , 57, "Guest attempted to execute RDRAND."),
+ EXIT_REASON(VMX_EXIT_INVPCID , 58, "Guest attempted to execute INVPCID."),
+ EXIT_REASON(VMX_EXIT_VMFUNC , 59, "Guest attempted to execute VMFUNC.")
+};
+/** Exit reason descriptions for AMD-V, used to describe statistics. */
+static const char * const g_apszAmdVExitReasons[MAX_EXITREASON_STAT] =
+{
+ EXIT_REASON(SVM_EXIT_READ_CR0 , 0, "Read CR0."),
+ EXIT_REASON(SVM_EXIT_READ_CR1 , 1, "Read CR1."),
+ EXIT_REASON(SVM_EXIT_READ_CR2 , 2, "Read CR2."),
+ EXIT_REASON(SVM_EXIT_READ_CR3 , 3, "Read CR3."),
+ EXIT_REASON(SVM_EXIT_READ_CR4 , 4, "Read CR4."),
+ EXIT_REASON(SVM_EXIT_READ_CR5 , 5, "Read CR5."),
+ EXIT_REASON(SVM_EXIT_READ_CR6 , 6, "Read CR6."),
+ EXIT_REASON(SVM_EXIT_READ_CR7 , 7, "Read CR7."),
+ EXIT_REASON(SVM_EXIT_READ_CR8 , 8, "Read CR8."),
+ EXIT_REASON(SVM_EXIT_READ_CR9 , 9, "Read CR9."),
+ EXIT_REASON(SVM_EXIT_READ_CR10 , 10, "Read CR10."),
+ EXIT_REASON(SVM_EXIT_READ_CR11 , 11, "Read CR11."),
+ EXIT_REASON(SVM_EXIT_READ_CR12 , 12, "Read CR12."),
+ EXIT_REASON(SVM_EXIT_READ_CR13 , 13, "Read CR13."),
+ EXIT_REASON(SVM_EXIT_READ_CR14 , 14, "Read CR14."),
+ EXIT_REASON(SVM_EXIT_READ_CR15 , 15, "Read CR15."),
+ EXIT_REASON(SVM_EXIT_WRITE_CR0 , 16, "Write CR0."),
+ EXIT_REASON(SVM_EXIT_WRITE_CR1 , 17, "Write CR1."),
+ EXIT_REASON(SVM_EXIT_WRITE_CR2 , 18, "Write CR2."),
+ EXIT_REASON(SVM_EXIT_WRITE_CR3 , 19, "Write CR3."),
+ EXIT_REASON(SVM_EXIT_WRITE_CR4 , 20, "Write CR4."),
+ EXIT_REASON(SVM_EXIT_WRITE_CR5 , 21, "Write CR5."),
+ EXIT_REASON(SVM_EXIT_WRITE_CR6 , 22, "Write CR6."),
+ EXIT_REASON(SVM_EXIT_WRITE_CR7 , 23, "Write CR7."),
+ EXIT_REASON(SVM_EXIT_WRITE_CR8 , 24, "Write CR8."),
+ EXIT_REASON(SVM_EXIT_WRITE_CR9 , 25, "Write CR9."),
+ EXIT_REASON(SVM_EXIT_WRITE_CR10 , 26, "Write CR10."),
+ EXIT_REASON(SVM_EXIT_WRITE_CR11 , 27, "Write CR11."),
+ EXIT_REASON(SVM_EXIT_WRITE_CR12 , 28, "Write CR12."),
+ EXIT_REASON(SVM_EXIT_WRITE_CR13 , 29, "Write CR13."),
+ EXIT_REASON(SVM_EXIT_WRITE_CR14 , 30, "Write CR14."),
+ EXIT_REASON(SVM_EXIT_WRITE_CR15 , 31, "Write CR15."),
+ EXIT_REASON(SVM_EXIT_READ_DR0 , 32, "Read DR0."),
+ EXIT_REASON(SVM_EXIT_READ_DR1 , 33, "Read DR1."),
+ EXIT_REASON(SVM_EXIT_READ_DR2 , 34, "Read DR2."),
+ EXIT_REASON(SVM_EXIT_READ_DR3 , 35, "Read DR3."),
+ EXIT_REASON(SVM_EXIT_READ_DR4 , 36, "Read DR4."),
+ EXIT_REASON(SVM_EXIT_READ_DR5 , 37, "Read DR5."),
+ EXIT_REASON(SVM_EXIT_READ_DR6 , 38, "Read DR6."),
+ EXIT_REASON(SVM_EXIT_READ_DR7 , 39, "Read DR7."),
+ EXIT_REASON(SVM_EXIT_READ_DR8 , 40, "Read DR8."),
+ EXIT_REASON(SVM_EXIT_READ_DR9 , 41, "Read DR9."),
+ EXIT_REASON(SVM_EXIT_READ_DR10 , 42, "Read DR10."),
+ EXIT_REASON(SVM_EXIT_READ_DR11 , 43, "Read DR11"),
+ EXIT_REASON(SVM_EXIT_READ_DR12 , 44, "Read DR12."),
+ EXIT_REASON(SVM_EXIT_READ_DR13 , 45, "Read DR13."),
+ EXIT_REASON(SVM_EXIT_READ_DR14 , 46, "Read DR14."),
+ EXIT_REASON(SVM_EXIT_READ_DR15 , 47, "Read DR15."),
+ EXIT_REASON(SVM_EXIT_WRITE_DR0 , 48, "Write DR0."),
+ EXIT_REASON(SVM_EXIT_WRITE_DR1 , 49, "Write DR1."),
+ EXIT_REASON(SVM_EXIT_WRITE_DR2 , 50, "Write DR2."),
+ EXIT_REASON(SVM_EXIT_WRITE_DR3 , 51, "Write DR3."),
+ EXIT_REASON(SVM_EXIT_WRITE_DR4 , 52, "Write DR4."),
+ EXIT_REASON(SVM_EXIT_WRITE_DR5 , 53, "Write DR5."),
+ EXIT_REASON(SVM_EXIT_WRITE_DR6 , 54, "Write DR6."),
+ EXIT_REASON(SVM_EXIT_WRITE_DR7 , 55, "Write DR7."),
+ EXIT_REASON(SVM_EXIT_WRITE_DR8 , 56, "Write DR8."),
+ EXIT_REASON(SVM_EXIT_WRITE_DR9 , 57, "Write DR9."),
+ EXIT_REASON(SVM_EXIT_WRITE_DR10 , 58, "Write DR10."),
+ EXIT_REASON(SVM_EXIT_WRITE_DR11 , 59, "Write DR11."),
+ EXIT_REASON(SVM_EXIT_WRITE_DR12 , 60, "Write DR12."),
+ EXIT_REASON(SVM_EXIT_WRITE_DR13 , 61, "Write DR13."),
+ EXIT_REASON(SVM_EXIT_WRITE_DR14 , 62, "Write DR14."),
+ EXIT_REASON(SVM_EXIT_WRITE_DR15 , 63, "Write DR15."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_0 , 64, "Exception Vector 0 (#DE)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_1 , 65, "Exception Vector 1 (#DB)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_2 , 66, "Exception Vector 2 (#NMI)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_3 , 67, "Exception Vector 3 (#BP)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_4 , 68, "Exception Vector 4 (#OF)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_5 , 69, "Exception Vector 5 (#BR)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_6 , 70, "Exception Vector 6 (#UD)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_7 , 71, "Exception Vector 7 (#NM)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_8 , 72, "Exception Vector 8 (#DF)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_9 , 73, "Exception Vector 9 (#CO_SEG_OVERRUN)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_A , 74, "Exception Vector 10 (#TS)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_B , 75, "Exception Vector 11 (#NP)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_C , 76, "Exception Vector 12 (#SS)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_D , 77, "Exception Vector 13 (#GP)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_E , 78, "Exception Vector 14 (#PF)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_F , 79, "Exception Vector 15 (0x0f)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_10 , 80, "Exception Vector 16 (#MF)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_11 , 81, "Exception Vector 17 (#AC)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_12 , 82, "Exception Vector 18 (#MC)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_13 , 83, "Exception Vector 19 (#XF)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_14 , 84, "Exception Vector 20 (0x14)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_15 , 85, "Exception Vector 22 (0x15)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_16 , 86, "Exception Vector 22 (0x16)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_17 , 87, "Exception Vector 23 (0x17)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_18 , 88, "Exception Vector 24 (0x18)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_19 , 89, "Exception Vector 25 (0x19)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_1A , 90, "Exception Vector 26 (0x1A)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_1B , 91, "Exception Vector 27 (0x1B)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_1C , 92, "Exception Vector 28 (0x1C)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_1D , 93, "Exception Vector 29 (0x1D)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_1E , 94, "Exception Vector 30 (0x1E)."),
+ EXIT_REASON(SVM_EXIT_EXCEPTION_1F , 95, "Exception Vector 31 (0x1F)."),
+ EXIT_REASON(SVM_EXIT_INTR , 96, "Physical maskable interrupt (host)."),
+ EXIT_REASON(SVM_EXIT_NMI , 97, "Physical non-maskable interrupt (host)."),
+ EXIT_REASON(SVM_EXIT_SMI , 98, "System management interrupt (host)."),
+ EXIT_REASON(SVM_EXIT_INIT , 99, "Physical INIT signal (host)."),
+ EXIT_REASON(SVM_EXIT_VINTR ,100, "Virtual interrupt-window exit."),
+ EXIT_REASON(SVM_EXIT_CR0_SEL_WRITE ,101, "Write to CR0 that changed any bits other than CR0.TS or CR0.MP."),
+ EXIT_REASON(SVM_EXIT_IDTR_READ ,102, "Read IDTR"),
+ EXIT_REASON(SVM_EXIT_GDTR_READ ,103, "Read GDTR"),
+ EXIT_REASON(SVM_EXIT_LDTR_READ ,104, "Read LDTR."),
+ EXIT_REASON(SVM_EXIT_TR_READ ,105, "Read TR."),
+ EXIT_REASON(SVM_EXIT_TR_READ ,106, "Write IDTR."),
+ EXIT_REASON(SVM_EXIT_TR_READ ,107, "Write GDTR."),
+ EXIT_REASON(SVM_EXIT_TR_READ ,108, "Write LDTR."),
+ EXIT_REASON(SVM_EXIT_TR_READ ,109, "Write TR."),
+ EXIT_REASON(SVM_EXIT_RDTSC ,110, "RDTSC instruction."),
+ EXIT_REASON(SVM_EXIT_RDPMC ,111, "RDPMC instruction."),
+ EXIT_REASON(SVM_EXIT_PUSHF ,112, "PUSHF instruction."),
+ EXIT_REASON(SVM_EXIT_POPF ,113, "POPF instruction."),
+ EXIT_REASON(SVM_EXIT_CPUID ,114, "CPUID instruction."),
+ EXIT_REASON(SVM_EXIT_RSM ,115, "RSM instruction."),
+ EXIT_REASON(SVM_EXIT_IRET ,116, "IRET instruction."),
+ EXIT_REASON(SVM_EXIT_SWINT ,117, "Software interrupt (INTn instructions)."),
+ EXIT_REASON(SVM_EXIT_INVD ,118, "INVD instruction."),
+ EXIT_REASON(SVM_EXIT_PAUSE ,119, "PAUSE instruction."),
+ EXIT_REASON(SVM_EXIT_HLT ,120, "HLT instruction."),
+ EXIT_REASON(SVM_EXIT_INVLPG ,121, "INVLPG instruction."),
+ EXIT_REASON(SVM_EXIT_INVLPGA ,122, "INVLPGA instruction."),
+ EXIT_REASON(SVM_EXIT_IOIO ,123, "IN/OUT accessing protected port."),
+ EXIT_REASON(SVM_EXIT_MSR ,124, "RDMSR or WRMSR access to protected MSR."),
+ EXIT_REASON(SVM_EXIT_TASK_SWITCH ,125, "Task switch."),
+ EXIT_REASON(SVM_EXIT_FERR_FREEZE ,126, "Legacy FPU handling enabled; processor is frozen in an x87/mmx instruction waiting for an interrupt"),
+ EXIT_REASON(SVM_EXIT_SHUTDOWN ,127, "Shutdown."),
+ EXIT_REASON(SVM_EXIT_VMRUN ,128, "VMRUN instruction."),
+ EXIT_REASON(SVM_EXIT_VMMCALL ,129, "VMCALL instruction."),
+ EXIT_REASON(SVM_EXIT_VMLOAD ,130, "VMLOAD instruction."),
+ EXIT_REASON(SVM_EXIT_VMSAVE ,131, "VMSAVE instruction."),
+ EXIT_REASON(SVM_EXIT_STGI ,132, "STGI instruction."),
+ EXIT_REASON(SVM_EXIT_CLGI ,133, "CLGI instruction."),
+ EXIT_REASON(SVM_EXIT_SKINIT ,134, "SKINIT instruction."),
+ EXIT_REASON(SVM_EXIT_RDTSCP ,135, "RDTSCP instruction."),
+ EXIT_REASON(SVM_EXIT_ICEBP ,136, "ICEBP instruction."),
+ EXIT_REASON(SVM_EXIT_WBINVD ,137, "WBINVD instruction."),
+ EXIT_REASON(SVM_EXIT_MONITOR ,138, "MONITOR instruction."),
+ EXIT_REASON(SVM_EXIT_MWAIT ,139, "MWAIT instruction."),
+ EXIT_REASON(SVM_EXIT_MWAIT_ARMED ,140, "MWAIT instruction when armed."),
+ EXIT_REASON(SVM_EXIT_NPF ,1024, "Nested paging fault."),
+ EXIT_REASON_NIL()
+};
+# undef EXIT_REASON
+# undef EXIT_REASON_NIL
+#endif /* VBOX_WITH_STATISTICS */
+
+#define HMVMX_REPORT_FEATURE(allowed1, disallowed0, featflag) \
+ do { \
+ if ((allowed1) & (featflag)) \
+ LogRel(("HM: " #featflag "\n")); \
+ else \
+ LogRel(("HM: " #featflag " (must be cleared)\n")); \
+ if ((disallowed0) & (featflag)) \
+ LogRel(("HM: " #featflag " (must be set)\n")); \
+ } while (0)
+
+#define HMVMX_REPORT_ALLOWED_FEATURE(allowed1, featflag) \
+ do { \
+ if ((allowed1) & (featflag)) \
+ LogRel(("HM: " #featflag "\n")); \
+ else \
+ LogRel(("HM: " #featflag " not supported\n")); \
+ } while (0)
+
+#define HMVMX_REPORT_CAPABILITY(msrcaps, cap) \
+ do { \
+ if ((msrcaps) & (cap)) \
+ LogRel(("HM: " #cap "\n")); \
+ } while (0)
+
+
+/*******************************************************************************
+* Internal Functions *
+*******************************************************************************/
+static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM);
+static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
+static int hmR3InitCPU(PVM pVM);
+static int hmR3InitFinalizeR0(PVM pVM);
+static int hmR3InitFinalizeR0Intel(PVM pVM);
+static int hmR3InitFinalizeR0Amd(PVM pVM);
+static int hmR3TermCPU(PVM pVM);
+
+
+
+/**
+ * Initializes the HM.
+ *
+ * This reads the config and check whether VT-x or AMD-V hardware is available
+ * if configured to use it. This is one of the very first components to be
+ * initialized after CFGM, so that we can fall back to raw-mode early in the
+ * initialization process.
+ *
+ * Note that a lot of the set up work is done in ring-0 and thus postponed till
+ * the ring-3 and ring-0 callback to HMR3InitCompleted.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ *
+ * @remarks Be careful with what we call here, since most of the VMM components
+ * are uninitialized.
+ */
+VMMR3_INT_DECL(int) HMR3Init(PVM pVM)
+{
+ LogFlow(("HMR3Init\n"));
+
+ /*
+ * Assert alignment and sizes.
+ */
+ AssertCompileMemberAlignment(VM, hm.s, 32);
+ AssertCompile(sizeof(pVM->hm.s) <= sizeof(pVM->hm.padding));
+
+ /*
+ * Register the saved state data unit.
+ */
+ int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HM_SSM_VERSION, sizeof(HM),
+ NULL, NULL, NULL,
+ NULL, hmR3Save, NULL,
+ NULL, hmR3Load, NULL);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Misc initialisation.
+ */
+ //pVM->hm.s.vmx.fSupported = false;
+ //pVM->hm.s.svm.fSupported = false;
+ //pVM->hm.s.vmx.fEnabled = false;
+ //pVM->hm.s.svm.fEnabled = false;
+ //pVM->hm.s.fNestedPaging = false;
+
+
+ /*
+ * Read configuration.
+ */
+ PCFGMNODE pCfgHM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HM/");
+
+ /** @cfgm{/HM/HMForced, bool, false}
+ * Forces hardware virtualization, no falling back on raw-mode. HM must be
+ * enabled, i.e. /HMEnabled must be true. */
+ bool fHMForced;
+#ifdef VBOX_WITH_RAW_MODE
+ rc = CFGMR3QueryBoolDef(pCfgHM, "HMForced", &fHMForced, false);
+ AssertRCReturn(rc, rc);
+ AssertLogRelMsgReturn(!fHMForced || pVM->fHMEnabled, ("Configuration error: HM forced but not enabled!\n"),
+ VERR_INVALID_PARAMETER);
+# if defined(RT_OS_DARWIN)
+ if (pVM->fHMEnabled)
+ fHMForced = true;
+# endif
+ AssertLogRelMsgReturn(pVM->cCpus == 1 || pVM->fHMEnabled, ("Configuration error: SMP requires HM to be enabled!\n"),
+ VERR_INVALID_PARAMETER);
+ if (pVM->cCpus > 1)
+ fHMForced = true;
+#else /* !VBOX_WITH_RAW_MODE */
+ AssertRelease(pVM->fHMEnabled);
+ fHMForced = true;
+#endif /* !VBOX_WITH_RAW_MODE */
+
+ /** @cfgm{/HM/EnableNestedPaging, bool, false}
+ * Enables nested paging (aka extended page tables). */
+ rc = CFGMR3QueryBoolDef(pCfgHM, "EnableNestedPaging", &pVM->hm.s.fAllowNestedPaging, false);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/EnableUX, bool, true}
+ * Enables the VT-x unrestricted execution feature. */
+ rc = CFGMR3QueryBoolDef(pCfgHM, "EnableUX", &pVM->hm.s.vmx.fAllowUnrestricted, true);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/EnableLargePages, bool, false}
+ * Enables using large pages (2 MB) for guest memory, thus saving on (nested)
+ * page table walking and maybe better TLB hit rate in some cases. */
+ rc = CFGMR3QueryBoolDef(pCfgHM, "EnableLargePages", &pVM->hm.s.fLargePages, false);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/EnableVPID, bool, false}
+ * Enables the VT-x VPID feature. */
+ rc = CFGMR3QueryBoolDef(pCfgHM, "EnableVPID", &pVM->hm.s.vmx.fAllowVpid, false);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/TPRPatchingEnabled, bool, false}
+ * Enables TPR patching for 32-bit windows guests with IO-APIC. */
+ rc = CFGMR3QueryBoolDef(pCfgHM, "TPRPatchingEnabled", &pVM->hm.s.fTRPPatchingAllowed, false);
+ AssertRCReturn(rc, rc);
+
+ /** @cfgm{/HM/64bitEnabled, bool, 32-bit:false, 64-bit:true}
+ * Enables AMD64 cpu features.
+ * On 32-bit hosts this isn't default and require host CPU support. 64-bit hosts
+ * already have the support. */
+#ifdef VBOX_ENABLE_64_BITS_GUESTS
+ rc = CFGMR3QueryBoolDef(pCfgHM, "64bitEnabled", &pVM->hm.s.fAllow64BitGuests, HC_ARCH_BITS == 64);
+ AssertLogRelRCReturn(rc, rc);
+#else
+ pVM->hm.s.fAllow64BitGuests = false;
+#endif
+
+ /** @cfgm{/HM/Exclusive, bool}
+ * Determines the init method for AMD-V and VT-x. If set to true, HM will do a
+ * global init for each host CPU. If false, we do local init each time we wish
+ * to execute guest code.
+ *
+ * Default is false for Mac OS X and Windows due to the higher risk of conflicts
+ * with other hypervisors.
+ */
+ rc = CFGMR3QueryBoolDef(pCfgHM, "Exclusive", &pVM->hm.s.fGlobalInit,
+#if defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS)
+ false
+#else
+ true
+#endif
+ );
+ AssertLogRelRCReturn(rc, rc);
+
+ /** @cfgm{/HM/MaxResumeLoops, uint32_t}
+ * The number of times to resume guest execution before we forcibly return to
+ * ring-3. The return value of RTThreadPreemptIsPendingTrusty in ring-0
+ * determines the default value. */
+ rc = CFGMR3QueryU32Def(pCfgHM, "MaxResumeLoops", &pVM->hm.s.cMaxResumeLoops, 0 /* set by R0 later */);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Check if VT-x or AMD-v support according to the users wishes.
+ */
+ /** @todo SUPR3QueryVTCaps won't catch VERR_VMX_IN_VMX_ROOT_MODE or
+ * VERR_SVM_IN_USE. */
+ if (pVM->fHMEnabled)
+ {
+ uint32_t fCaps;
+ rc = SUPR3QueryVTCaps(&fCaps);
+ if (RT_SUCCESS(rc))
+ {
+ if (fCaps & SUPVTCAPS_AMD_V)
+ LogRel(("HMR3Init: AMD-V%s\n", fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : ""));
+ else if (fCaps & SUPVTCAPS_VT_X)
+ {
+ rc = SUPR3QueryVTxSupported();
+ if (RT_SUCCESS(rc))
+ LogRel(("HMR3Init: VT-x%s\n", fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : ""));
+ else
+ {
+#ifdef RT_OS_LINUX
+ const char *pszMinReq = " Linux 2.6.13 or newer required!";
+#else
+ const char *pszMinReq = "";
+#endif
+ if (fHMForced)
+ return VMSetError(pVM, rc, RT_SRC_POS, "The host kernel does not support VT-x.%s\n", pszMinReq);
+
+ /* Fall back to raw-mode. */
+ LogRel(("HMR3Init: Falling back to raw-mode: The host kernel does not support VT-x.%s\n", pszMinReq));
+ pVM->fHMEnabled = false;
+ }
+ }
+ else
+ AssertLogRelMsgFailedReturn(("SUPR3QueryVTCaps didn't return either AMD-V or VT-x flag set (%#x)!\n", fCaps),
+ VERR_INTERNAL_ERROR_5);
+
+ /*
+ * Do we require a little bit or raw-mode for 64-bit guest execution?
+ */
+ pVM->fHMNeedRawModeCtx = HC_ARCH_BITS == 32
+ && pVM->fHMEnabled
+ && pVM->hm.s.fAllow64BitGuests;
+ }
+ else
+ {
+ const char *pszMsg;
+ switch (rc)
+ {
+ case VERR_UNSUPPORTED_CPU:
+ pszMsg = "Unknown CPU, VT-x or AMD-v features cannot be ascertained.";
+ break;
+
+ case VERR_VMX_NO_VMX:
+ pszMsg = "VT-x is not available.";
+ break;
+
+ case VERR_VMX_MSR_VMXON_DISABLED:
+ pszMsg = "VT-x is disabled in the BIOS.";
+ break;
+
+ case VERR_VMX_MSR_SMX_VMXON_DISABLED:
+ pszMsg = "VT-x is disabled in the BIOS for Safer-Mode/Trusted Extensions.";
+ break;
+
+ case VERR_VMX_MSR_LOCKING_FAILED:
+ pszMsg = "Failed to enable and lock VT-x features.";
+ break;
+
+ case VERR_SVM_NO_SVM:
+ pszMsg = "AMD-V is not available.";
+ break;
+
+ case VERR_SVM_DISABLED:
+ pszMsg = "AMD-V is disabled in the BIOS (or by the host OS).";
+ break;
+
+ default:
+ pszMsg = NULL;
+ break;
+ }
+ if (fHMForced && pszMsg)
+ return VM_SET_ERROR(pVM, rc, pszMsg);
+ if (!pszMsg)
+ return VMSetError(pVM, rc, RT_SRC_POS, "SUPR3QueryVTCaps failed with %Rrc", rc);
+
+ /* Fall back to raw-mode. */
+ LogRel(("HMR3Init: Falling back to raw-mode: %s\n", pszMsg));
+ pVM->fHMEnabled = false;
+ }
+ }
+
+ /* It's now OK to use the predicate function. */
+ pVM->fHMEnabledFixed = true;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Initializes the per-VCPU HM.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ */
+static int hmR3InitCPU(PVM pVM)
+{
+ LogFlow(("HMR3InitCPU\n"));
+
+ if (!HMIsEnabled(pVM))
+ return VINF_SUCCESS;
+
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = &pVM->aCpus[i];
+ pVCpu->hm.s.fActive = false;
+ }
+
+#ifdef VBOX_WITH_STATISTICS
+ STAM_REG(pVM, &pVM->hm.s.StatTprPatchSuccess, STAMTYPE_COUNTER, "/HM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
+ STAM_REG(pVM, &pVM->hm.s.StatTprPatchFailure, STAMTYPE_COUNTER, "/HM/TPR/Patch/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
+ STAM_REG(pVM, &pVM->hm.s.StatTprReplaceSuccess, STAMTYPE_COUNTER, "/HM/TPR/Replace/Success",STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
+ STAM_REG(pVM, &pVM->hm.s.StatTprReplaceFailure, STAMTYPE_COUNTER, "/HM/TPR/Replace/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
+#endif
+
+ /*
+ * Statistics.
+ */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = &pVM->aCpus[i];
+ int rc;
+
+#ifdef VBOX_WITH_STATISTICS
+ rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
+ "Profiling of RTMpPokeCpu",
+ "/PROF/CPU%d/HM/Poke", i);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatSpinPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
+ "Profiling of poke wait",
+ "/PROF/CPU%d/HM/PokeWait", i);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatSpinPokeFailed, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
+ "Profiling of poke wait when RTMpPokeCpu fails",
+ "/PROF/CPU%d/HM/PokeWaitFailed", i);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatEntry, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
+ "Profiling of VMXR0RunGuestCode entry",
+ "/PROF/CPU%d/HM/StatEntry", i);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
+ "Profiling of VMXR0RunGuestCode exit part 1",
+ "/PROF/CPU%d/HM/SwitchFromGC_1", i);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
+ "Profiling of VMXR0RunGuestCode exit part 2",
+ "/PROF/CPU%d/HM/SwitchFromGC_2", i);
+ AssertRC(rc);
+
+ rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitIO, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
+ "I/O",
+ "/PROF/CPU%d/HM/SwitchFromGC_2/IO", i);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitMovCRx, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
+ "MOV CRx",
+ "/PROF/CPU%d/HM/SwitchFromGC_2/MovCRx", i);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitXcptNmi, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
+ "Exceptions, NMIs",
+ "/PROF/CPU%d/HM/SwitchFromGC_2/XcptNmi", i);
+ AssertRC(rc);
+
+ rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatLoadGuestState, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
+ "Profiling of VMXR0LoadGuestState",
+ "/PROF/CPU%d/HM/StatLoadGuestState", i);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatInGC, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
+ "Profiling of VMLAUNCH/VMRESUME.",
+ "/PROF/CPU%d/HM/InGC", i);
+ AssertRC(rc);
+
+# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
+ rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatWorldSwitch3264, STAMTYPE_PROFILE, STAMVISIBILITY_USED,
+ STAMUNIT_TICKS_PER_CALL, "Profiling of the 32/64 switcher.",
+ "/PROF/CPU%d/HM/Switcher3264", i);
+ AssertRC(rc);
+# endif
+
+# ifdef HM_PROFILE_EXIT_DISPATCH
+ rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitDispatch, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_USED,
+ STAMUNIT_TICKS_PER_CALL, "Profiling the dispatching of exit handlers.",
+ "/PROF/CPU%d/HM/ExitDispatch", i);
+ AssertRC(rc);
+# endif
+
+#endif
+# define HM_REG_COUNTER(a, b, desc) \
+ rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, desc, b, i); \
+ AssertRC(rc);
+
+#ifdef VBOX_WITH_STATISTICS
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitAll, "/HM/CPU%d/Exit/All", "Exits (total).");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowNM, "/HM/CPU%d/Exit/Trap/Shw/#NM", "Shadow #NM (device not available, no math co-processor) exception.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestNM, "/HM/CPU%d/Exit/Trap/Gst/#NM", "Guest #NM (device not available, no math co-processor) exception.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowPF, "/HM/CPU%d/Exit/Trap/Shw/#PF", "Shadow #PF (page fault) exception.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowPFEM, "/HM/CPU%d/Exit/Trap/Shw/#PF-EM", "#PF (page fault) exception going back to ring-3 for emulating the instruction.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestPF, "/HM/CPU%d/Exit/Trap/Gst/#PF", "Guest #PF (page fault) exception.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestUD, "/HM/CPU%d/Exit/Trap/Gst/#UD", "Guest #UD (undefined opcode) exception.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestSS, "/HM/CPU%d/Exit/Trap/Gst/#SS", "Guest #SS (stack-segment fault) exception.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestNP, "/HM/CPU%d/Exit/Trap/Gst/#NP", "Guest #NP (segment not present) exception.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestGP, "/HM/CPU%d/Exit/Trap/Gst/#GP", "Guest #GP (general protection) execption.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestMF, "/HM/CPU%d/Exit/Trap/Gst/#MF", "Guest #MF (x87 FPU error, math fault) exception.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestDE, "/HM/CPU%d/Exit/Trap/Gst/#DE", "Guest #DE (divide error) exception.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestDB, "/HM/CPU%d/Exit/Trap/Gst/#DB", "Guest #DB (debug) exception.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestBP, "/HM/CPU%d/Exit/Trap/Gst/#BP", "Guest #BP (breakpoint) exception.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXF, "/HM/CPU%d/Exit/Trap/Gst/#XF", "Guest #XF (extended math fault, SIMD FPU) exception.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXcpUnk, "/HM/CPU%d/Exit/Trap/Gst/Other", "Other guest exceptions.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitInvlpg, "/HM/CPU%d/Exit/Instr/Invlpg", "Guest attempted to execute INVLPG.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitInvd, "/HM/CPU%d/Exit/Instr/Invd", "Guest attempted to execute INVD.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitWbinvd, "/HM/CPU%d/Exit/Instr/Wbinvd", "Guest attempted to execute WBINVD.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitPause, "/HM/CPU%d/Exit/Instr/Pause", "Guest attempted to execute PAUSE.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitCpuid, "/HM/CPU%d/Exit/Instr/Cpuid", "Guest attempted to execute CPUID.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdtsc, "/HM/CPU%d/Exit/Instr/Rdtsc", "Guest attempted to execute RDTSC.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdtscp, "/HM/CPU%d/Exit/Instr/Rdtscp", "Guest attempted to execute RDTSCP.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdpmc, "/HM/CPU%d/Exit/Instr/Rdpmc", "Guest attempted to execute RDPMC.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdrand, "/HM/CPU%d/Exit/Instr/Rdrand", "Guest attempted to execute RDRAND.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdmsr, "/HM/CPU%d/Exit/Instr/Rdmsr", "Guest attempted to execute RDMSR.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitWrmsr, "/HM/CPU%d/Exit/Instr/Wrmsr", "Guest attempted to execute WRMSR.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitMwait, "/HM/CPU%d/Exit/Instr/Mwait", "Guest attempted to execute MWAIT.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitMonitor, "/HM/CPU%d/Exit/Instr/Monitor", "Guest attempted to execute MONITOR.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxWrite, "/HM/CPU%d/Exit/Instr/DR/Write", "Guest attempted to write a debug register.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxRead, "/HM/CPU%d/Exit/Instr/DR/Read", "Guest attempted to read a debug register.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitClts, "/HM/CPU%d/Exit/Instr/CLTS", "Guest attempted to execute CLTS.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitLmsw, "/HM/CPU%d/Exit/Instr/LMSW", "Guest attempted to execute LMSW.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitCli, "/HM/CPU%d/Exit/Instr/Cli", "Guest attempted to execute CLI.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitSti, "/HM/CPU%d/Exit/Instr/Sti", "Guest attempted to execute STI.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitPushf, "/HM/CPU%d/Exit/Instr/Pushf", "Guest attempted to execute PUSHF.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitPopf, "/HM/CPU%d/Exit/Instr/Popf", "Guest attempted to execute POPF.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitIret, "/HM/CPU%d/Exit/Instr/Iret", "Guest attempted to execute IRET.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitInt, "/HM/CPU%d/Exit/Instr/Int", "Guest attempted to execute INT.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitHlt, "/HM/CPU%d/Exit/Instr/Hlt", "Guest attempted to execute HLT.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitXdtrAccess, "/HM/CPU%d/Exit/Instr/XdtrAccess", "Guest attempted to access descriptor table register (GDTR, IDTR, LDTR).");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOWrite, "/HM/CPU%d/Exit/IO/Write", "I/O write.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitIORead, "/HM/CPU%d/Exit/IO/Read", "I/O read.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringWrite, "/HM/CPU%d/Exit/IO/WriteString", "String I/O write.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringRead, "/HM/CPU%d/Exit/IO/ReadString", "String I/O read.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitIntWindow, "/HM/CPU%d/Exit/IntWindow", "Interrupt-window exit. Guest is ready to receive interrupts again.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitMaxResume, "/HM/CPU%d/Exit/MaxResume", "Maximum VMRESUME inner-loop counter reached.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitExtInt, "/HM/CPU%d/Exit/ExtInt", "Host interrupt received.");
+#endif
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitHostNmiInGC, "/HM/CPU%d/Exit/HostNmiInGC", "Host NMI received while in guest context.");
+#ifdef VBOX_WITH_STATISTICS
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitPreemptTimer, "/HM/CPU%d/Exit/PreemptTimer", "VMX-preemption timer expired.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitTprBelowThreshold, "/HM/CPU%d/Exit/TprBelowThreshold", "TPR lowered below threshold by the guest.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitTaskSwitch, "/HM/CPU%d/Exit/TaskSwitch", "Guest attempted a task switch.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitMtf, "/HM/CPU%d/Exit/MonitorTrapFlag", "Monitor Trap Flag.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatExitApicAccess, "/HM/CPU%d/Exit/ApicAccess", "APIC access. Guest attempted to access memory at a physical address on the APIC-access page.");
+
+ HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchGuestIrq, "/HM/CPU%d/Switch/IrqPending", "PDMGetInterrupt() cleared behind our back!?!.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatPendingHostIrq, "/HM/CPU%d/Switch/PendingHostIrq", "Exit to ring-3 due to pending host interrupt before executing guest code.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchHmToR3FF, "/HM/CPU%d/Switch/HmToR3FF", "Exit to ring-3 due to pending timers, EMT rendezvous, critical section etc.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchExitToR3, "/HM/CPU%d/Switch/ExitToR3", "Exit to ring-3 (total).");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchLongJmpToR3, "/HM/CPU%d/Switch/LongJmpToR3", "Longjump to ring-3.");
+
+ HM_REG_COUNTER(&pVCpu->hm.s.StatInjectInterrupt, "/HM/CPU%d/EventInject/Interrupt", "Injected an external interrupt into the guest.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatInjectXcpt, "/HM/CPU%d/EventInject/Trap", "Injected an exception into the guest.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatInjectPendingReflect, "/HM/CPU%d/EventInject/PendingReflect", "Reflecting an exception back to the guest.");
+
+ HM_REG_COUNTER(&pVCpu->hm.s.StatPreemptPreempting, "/HM/CPU%d/Preempt/Preempting", "EMT has been preempted while in HM context.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatPreemptSaveHostState, "/HM/CPU%d/Preempt/SaveHostState", "Preemption caused us to resave host state.");
+
+ HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPage, "/HM/CPU%d/Flush/Page", "Invalidating a guest page on all guest CPUs.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPageManual, "/HM/CPU%d/Flush/Page/Virt", "Invalidating a guest page using guest-virtual address.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPhysPageManual, "/HM/CPU%d/Flush/Page/Phys", "Invalidating a guest page using guest-physical address.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlb, "/HM/CPU%d/Flush/TLB", "Forcing a full guest-TLB flush (ring-0).");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlbManual, "/HM/CPU%d/Flush/TLB/Manual", "Request a full guest-TLB flush.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlbWorldSwitch, "/HM/CPU%d/Flush/TLB/CpuSwitch", "Forcing a full guest-TLB flush due to host-CPU reschedule or ASID-limit hit by another guest-VCPU.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch, "/HM/CPU%d/Flush/TLB/Skipped", "No TLB flushing required.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatFlushEntire, "/HM/CPU%d/Flush/TLB/Entire", "Flush the entire TLB (host + guest).");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatFlushAsid, "/HM/CPU%d/Flush/TLB/ASID", "Flushed guest-TLB entries for the current VPID.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatFlushNestedPaging, "/HM/CPU%d/Flush/TLB/NestedPaging", "Flushed guest-TLB entries for the current EPT.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlbInvlpgVirt, "/HM/CPU%d/Flush/TLB/InvlpgVirt", "Invalidated a guest-TLB entry for a guest-virtual address.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlbInvlpgPhys, "/HM/CPU%d/Flush/TLB/InvlpgPhys", "Currently not possible, flushes entire guest-TLB.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatTlbShootdown, "/HM/CPU%d/Flush/Shootdown/Page", "Inter-VCPU request to flush queued guest page.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatTlbShootdownFlush, "/HM/CPU%d/Flush/Shootdown/TLB", "Inter-VCPU request to flush entire guest-TLB.");
+
+ HM_REG_COUNTER(&pVCpu->hm.s.StatTscOffset, "/HM/CPU%d/TSC/Offset", "TSC offsetting is in effect.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatTscIntercept, "/HM/CPU%d/TSC/Intercept", "Guest is in catchup mode, intercept TSC accesses.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatTscInterceptOverFlow, "/HM/CPU%d/TSC/InterceptOverflow", "TSC offset overflow, fallback to intercept TSC accesses.");
+
+ HM_REG_COUNTER(&pVCpu->hm.s.StatDRxArmed, "/HM/CPU%d/Debug/Armed", "Loaded guest-debug state while loading guest-state.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatDRxContextSwitch, "/HM/CPU%d/Debug/ContextSwitch", "Loaded guest-debug state on MOV DRx.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatDRxIoCheck, "/HM/CPU%d/Debug/IOCheck", "Checking for I/O breakpoint.");
+
+ HM_REG_COUNTER(&pVCpu->hm.s.StatLoadMinimal, "/HM/CPU%d/Load/Minimal", "VM-entry loading minimal guest-state.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatLoadFull, "/HM/CPU%d/Load/Full", "VM-entry loading the full guest-state.");
+
+ HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadRmSelBase, "/HM/CPU%d/VMXCheck/RMSelBase", "Could not use VMX due to unsuitable real-mode selector base.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit, "/HM/CPU%d/VMXCheck/RMSelLimit", "Could not use VMX due to unsuitable real-mode selector limit.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckRmOk, "/HM/CPU%d/VMXCheck/VMX_RM", "VMX execution in real (V86) mode OK.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadSel, "/HM/CPU%d/VMXCheck/Selector", "Could not use VMX due to unsuitable selector.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadRpl, "/HM/CPU%d/VMXCheck/RPL", "Could not use VMX due to unsuitable RPL.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadLdt, "/HM/CPU%d/VMXCheck/LDT", "Could not use VMX due to unsuitable LDT.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadTr, "/HM/CPU%d/VMXCheck/TR", "Could not use VMX due to unsuitable TR.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckPmOk, "/HM/CPU%d/VMXCheck/VMX_PM", "VMX execution in protected mode OK.");
+
+#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
+ HM_REG_COUNTER(&pVCpu->hm.s.StatFpu64SwitchBack, "/HM/CPU%d/Switch64/Fpu", "Saving guest FPU/XMM state.");
+ HM_REG_COUNTER(&pVCpu->hm.s.StatDebug64SwitchBack, "/HM/CPU%d/Switch64/Debug", "Saving guest debug state.");
+#endif
+
+ for (unsigned j = 0; j < RT_ELEMENTS(pVCpu->hm.s.StatExitCRxWrite); j++)
+ {
+ rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitCRxWrite[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
+ STAMUNIT_OCCURENCES, "Profiling of CRx writes",
+ "/HM/CPU%d/Exit/Instr/CR/Write/%x", i, j);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitCRxRead[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
+ STAMUNIT_OCCURENCES, "Profiling of CRx reads",
+ "/HM/CPU%d/Exit/Instr/CR/Read/%x", i, j);
+ AssertRC(rc);
+ }
+
+#undef HM_REG_COUNTER
+
+ pVCpu->hm.s.paStatExitReason = NULL;
+
+ rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT * sizeof(*pVCpu->hm.s.paStatExitReason), 0 /* uAlignment */, MM_TAG_HM,
+ (void **)&pVCpu->hm.s.paStatExitReason);
+ AssertRC(rc);
+ if (RT_SUCCESS(rc))
+ {
+ const char * const *papszDesc = ASMIsIntelCpu() ? &g_apszVTxExitReasons[0] : &g_apszAmdVExitReasons[0];
+ for (int j = 0; j < MAX_EXITREASON_STAT; j++)
+ {
+ if (papszDesc[j])
+ {
+ rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
+ STAMUNIT_OCCURENCES, papszDesc[j], "/HM/CPU%d/Exit/Reason/%02x", i, j);
+ AssertRC(rc);
+ }
+ }
+ rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitReasonNpf, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Nested page fault", "/HM/CPU%d/Exit/Reason/#NPF", i);
+ AssertRC(rc);
+ }
+ pVCpu->hm.s.paStatExitReasonR0 = MMHyperR3ToR0(pVM, pVCpu->hm.s.paStatExitReason);
+# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
+ Assert(pVCpu->hm.s.paStatExitReasonR0 != NIL_RTR0PTR || !HMIsEnabled(pVM));
+# else
+ Assert(pVCpu->hm.s.paStatExitReasonR0 != NIL_RTR0PTR);
+# endif
+
+ rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, 8, MM_TAG_HM, (void **)&pVCpu->hm.s.paStatInjectedIrqs);
+ AssertRCReturn(rc, rc);
+ pVCpu->hm.s.paStatInjectedIrqsR0 = MMHyperR3ToR0(pVM, pVCpu->hm.s.paStatInjectedIrqs);
+# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
+ Assert(pVCpu->hm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR || !HMIsEnabled(pVM));
+# else
+ Assert(pVCpu->hm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR);
+# endif
+ for (unsigned j = 0; j < 255; j++)
+ {
+ STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Injected event.",
+ (j < 0x20) ? "/HM/CPU%d/EventInject/Event/Trap/%02X" : "/HM/CPU%d/EventInject/Event/IRQ/%02X", i, j);
+ }
+
+#endif /* VBOX_WITH_STATISTICS */
+ }
+
+#ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ /*
+ * Magic marker for searching in crash dumps.
+ */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = &pVM->aCpus[i];
+
+ PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
+ strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
+ pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
+ }
+#endif
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Called when a init phase has completed.
+ *
+ * @returns VBox status code.
+ * @param pVM The VM.
+ * @param enmWhat The phase that completed.
+ */
+VMMR3_INT_DECL(int) HMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
+{
+ switch (enmWhat)
+ {
+ case VMINITCOMPLETED_RING3:
+ return hmR3InitCPU(pVM);
+ case VMINITCOMPLETED_RING0:
+ return hmR3InitFinalizeR0(pVM);
+ default:
+ return VINF_SUCCESS;
+ }
+}
+
+
+/**
+ * Turns off normal raw mode features.
+ *
+ * @param pVM Pointer to the VM.
+ */
+static void hmR3DisableRawMode(PVM pVM)
+{
+ /* Reinit the paging mode to force the new shadow mode. */
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = &pVM->aCpus[i];
+
+ PGMR3ChangeMode(pVM, pVCpu, PGMMODE_REAL);
+ }
+}
+
+
+/**
+ * Initialize VT-x or AMD-V.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ */
+static int hmR3InitFinalizeR0(PVM pVM)
+{
+ int rc;
+
+ if (!HMIsEnabled(pVM))
+ return VINF_SUCCESS;
+
+ /*
+ * Hack to allow users to work around broken BIOSes that incorrectly set
+ * EFER.SVME, which makes us believe somebody else is already using AMD-V.
+ */
+ if ( !pVM->hm.s.vmx.fSupported
+ && !pVM->hm.s.svm.fSupported
+ && pVM->hm.s.lLastError == VERR_SVM_IN_USE /* implies functional AMD-V */
+ && RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE"))
+ {
+ LogRel(("HM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n"));
+ pVM->hm.s.svm.fSupported = true;
+ pVM->hm.s.svm.fIgnoreInUseError = true;
+ pVM->hm.s.lLastError = VINF_SUCCESS;
+ }
+
+ /*
+ * Report ring-0 init errors.
+ */
+ if ( !pVM->hm.s.vmx.fSupported
+ && !pVM->hm.s.svm.fSupported)
+ {
+ LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s.lLastError));
+ LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.Msrs.u64FeatureCtrl));
+ switch (pVM->hm.s.lLastError)
+ {
+ case VERR_VMX_IN_VMX_ROOT_MODE:
+ return VM_SET_ERROR(pVM, VERR_VMX_IN_VMX_ROOT_MODE, "VT-x is being used by another hypervisor.");
+ case VERR_VMX_NO_VMX:
+ return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is not available.");
+ case VERR_VMX_MSR_VMXON_DISABLED:
+ return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is disabled in the BIOS.");
+ case VERR_VMX_MSR_SMX_VMXON_DISABLED:
+ return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is disabled in the BIOS for Safer-Mode/Trusted Extensions.");
+ case VERR_VMX_MSR_LOCKING_FAILED:
+ return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "Failed to enable and lock VT-x features.");
+
+ case VERR_SVM_IN_USE:
+ return VM_SET_ERROR(pVM, VERR_SVM_IN_USE, "AMD-V is being used by another hypervisor.");
+ case VERR_SVM_NO_SVM:
+ return VM_SET_ERROR(pVM, VERR_SVM_NO_SVM, "AMD-V is not available.");
+ case VERR_SVM_DISABLED:
+ return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS.");
+ }
+ return VMSetError(pVM, pVM->hm.s.lLastError, RT_SRC_POS, "HM ring-0 init failed: %Rrc", pVM->hm.s.lLastError);
+ }
+
+ /*
+ * Enable VT-x or AMD-V on all host CPUs.
+ */
+ rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HM_ENABLE, 0, NULL);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("HMR3InitFinalize: SUPR3CallVMMR0Ex VMMR0_DO_HM_ENABLE failed with %Rrc\n", rc));
+ return rc;
+ }
+
+ /*
+ * No TPR patching is required when the IO-APIC is not enabled for this VM.
+ * (Main should have taken care of this already)
+ */
+ pVM->hm.s.fHasIoApic = PDMHasIoApic(pVM);
+ if (!pVM->hm.s.fHasIoApic)
+ {
+ Assert(!pVM->hm.s.fTRPPatchingAllowed); /* paranoia */
+ pVM->hm.s.fTRPPatchingAllowed = false;
+ }
+
+ /*
+ * Do the vendor specific initalization .
+ * .
+ * Note! We disable release log buffering here since we're doing relatively .
+ * lot of logging and doesn't want to hit the disk with each LogRel .
+ * statement.
+ */
+ AssertLogRelReturn(!pVM->hm.s.fInitialized, VERR_HM_IPE_5);
+ bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
+ if (pVM->hm.s.vmx.fSupported)
+ rc = hmR3InitFinalizeR0Intel(pVM);
+ else
+ rc = hmR3InitFinalizeR0Amd(pVM);
+ LogRel(("HM: VT-x/AMD-V init method: %s\n", (pVM->hm.s.fGlobalInit) ? "GLOBAL" : "LOCAL"));
+ RTLogRelSetBuffering(fOldBuffered);
+ pVM->hm.s.fInitialized = true;
+
+ return rc;
+}
+
+
+/**
+ * Finish VT-x initialization (after ring-0 init).
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+static int hmR3InitFinalizeR0Intel(PVM pVM)
+{
+ int rc;
+
+ Log(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported));
+ AssertLogRelReturn(pVM->hm.s.vmx.Msrs.u64FeatureCtrl != 0, VERR_HM_IPE_4);
+
+ uint64_t val;
+ uint64_t zap;
+ RTGCPHYS GCPhys = 0;
+
+ LogRel(("HM: Using VT-x implementation 2.0!\n"));
+ LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s.vmx.u64HostCr4));
+ LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s.vmx.u64HostEfer));
+ LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %#RX64\n", pVM->hm.s.vmx.Msrs.u64FeatureCtrl));
+ LogRel(("HM: MSR_IA32_VMX_BASIC_INFO = %#RX64\n", pVM->hm.s.vmx.Msrs.u64BasicInfo));
+ LogRel(("HM: VMCS id = %#x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo)));
+ LogRel(("HM: VMCS size = %u bytes\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.Msrs.u64BasicInfo)));
+ LogRel(("HM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hm.s.vmx.Msrs.u64BasicInfo) ? "< 4 GB" : "None"));
+ LogRel(("HM: VMCS memory type = %#x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hm.s.vmx.Msrs.u64BasicInfo)));
+ LogRel(("HM: Dual-monitor treatment support = %RTbool\n", RT_BOOL(MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hm.s.vmx.Msrs.u64BasicInfo))));
+ LogRel(("HM: OUTS & INS instruction-info = %RTbool\n", RT_BOOL(MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo))));
+ LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoops));
+
+ LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxPinCtls.u));
+ val = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1;
+ zap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0;
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
+
+ LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls.u));
+ val = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1;
+ zap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0;
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_IO_BITMAPS);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL);
+ if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
+ {
+ LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.u));
+ val = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1;
+ zap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0;
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_EPT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_VIRT_X2APIC);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_VPID);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_INVPCID);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC);
+ }
+
+ LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxEntry.u));
+ val = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1;
+ zap = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0;
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_ENTRY_SMM);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR);
+
+ LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxExit.u));
+ val = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1;
+ zap = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0;
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_SAVE_DEBUG);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_ACK_EXT_INT);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR);
+ HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER);
+
+ if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps)
+ {
+ val = pVM->hm.s.vmx.Msrs.u64EptVpidCaps;
+ LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP = %#RX64\n", val));
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_RWX_W_ONLY);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_RWX_WX_ONLY);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_GAW_21_BITS);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_GAW_30_BITS);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_GAW_39_BITS);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_GAW_48_BITS);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_GAW_57_BITS);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_EMT_UC);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_EMT_WC);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_EMT_WT);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_EMT_WP);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_SP_21_BITS);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_SP_30_BITS);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_SP_39_BITS);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_SP_48_BITS);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVEPT);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVVPID);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS);
+ HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS);
+ }
+
+ val = pVM->hm.s.vmx.Msrs.u64Misc;
+ LogRel(("HM: MSR_IA32_VMX_MISC = %#RX64\n", val));
+ if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(val) == pVM->hm.s.vmx.cPreemptTimerShift)
+ LogRel(("HM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT = %#x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(val)));
+ else
+ {
+ LogRel(("HM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT = %#x - erratum detected, using %#x instead\n",
+ MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(val), pVM->hm.s.vmx.cPreemptTimerShift));
+ }
+
+ LogRel(("HM: MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT = %RTbool\n", RT_BOOL(MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT(val))));
+ LogRel(("HM: MSR_IA32_VMX_MISC_ACTIVITY_STATES = %#x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(val)));
+ LogRel(("HM: MSR_IA32_VMX_MISC_CR3_TARGET = %#x\n", MSR_IA32_VMX_MISC_CR3_TARGET(val)));
+ LogRel(("HM: MSR_IA32_VMX_MISC_MAX_MSR = %u\n", MSR_IA32_VMX_MISC_MAX_MSR(val)));
+ LogRel(("HM: MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM = %RTbool\n", RT_BOOL(MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM(val))));
+ LogRel(("HM: MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2 = %RTbool\n", RT_BOOL(MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2(val))));
+ LogRel(("HM: MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO = %RTbool\n", RT_BOOL(MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO(val))));
+ LogRel(("HM: MSR_IA32_VMX_MISC_MSEG_ID = %#x\n", MSR_IA32_VMX_MISC_MSEG_ID(val)));
+
+ /* Paranoia */
+ AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.Msrs.u64Misc) >= 512);
+
+ LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0 = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr0Fixed0));
+ LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1 = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr0Fixed1));
+ LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0 = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr4Fixed0));
+ LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1 = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr4Fixed1));
+
+ val = pVM->hm.s.vmx.Msrs.u64VmcsEnum;
+ LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM = %#RX64\n", val));
+ LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX = %#x\n", MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX(val)));
+
+ val = pVM->hm.s.vmx.Msrs.u64Vmfunc;
+ if (val)
+ {
+ LogRel(("HM: MSR_A32_VMX_VMFUNC = %#RX64\n", val));
+ HMVMX_REPORT_ALLOWED_FEATURE(val, VMX_VMCS_CTRL_VMFUNC_EPTP_SWITCHING);
+ }
+
+ LogRel(("HM: APIC-access page physaddr = %#RHp\n", pVM->hm.s.vmx.HCPhysApicAccess));
+
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap));
+ LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysVmcs));
+ }
+
+ if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
+ pVM->hm.s.fNestedPaging = pVM->hm.s.fAllowNestedPaging;
+
+ if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
+ pVM->hm.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid;
+
+ /*
+ * Disallow RDTSCP in the guest if there is no secondary process-based VM execution controls as otherwise
+ * RDTSCP would cause a #UD. There might be no CPUs out there where this happens, as RDTSCP was introduced
+ * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel...
+ */
+ if ( !(pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
+ && CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP))
+ {
+ CPUMClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
+ LogRel(("HM: RDTSCP disabled.\n"));
+ }
+
+ /* Unrestricted guest execution also requires EPT. */
+ if ( pVM->hm.s.vmx.fAllowUnrestricted
+ && pVM->hm.s.fNestedPaging
+ && (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST))
+ {
+ pVM->hm.s.vmx.fUnrestrictedGuest = true;
+ }
+
+ if (!pVM->hm.s.vmx.fUnrestrictedGuest)
+ {
+ /* Allocate three pages for the TSS we need for real mode emulation. (2 pages for the IO bitmap) */
+ rc = PDMR3VmmDevHeapAlloc(pVM, HM_VTX_TOTAL_DEVHEAP_MEM, (RTR3PTR *)&pVM->hm.s.vmx.pRealModeTSS);
+ if (RT_SUCCESS(rc))
+ {
+ /* The IO bitmap starts right after the virtual interrupt redirection bitmap.
+ Refer Intel spec. 20.3.3 "Software Interrupt Handling in Virtual-8086 mode"
+ esp. Figure 20-5.*/
+ ASMMemZero32(pVM->hm.s.vmx.pRealModeTSS, sizeof(*pVM->hm.s.vmx.pRealModeTSS));
+ pVM->hm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hm.s.vmx.pRealModeTSS);
+
+ /* Bit set to 0 means software interrupts are redirected to the
+ 8086 program interrupt handler rather than switching to
+ protected-mode handler. */
+ memset(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap, 0, sizeof(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap));
+
+ /* Allow all port IO, so that port IO instructions do not cause
+ exceptions and would instead cause a VM-exit (based on VT-x's
+ IO bitmap which we currently configure to always cause an exit). */
+ memset(pVM->hm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE * 2);
+ *((unsigned char *)pVM->hm.s.vmx.pRealModeTSS + HM_VTX_TSS_SIZE - 2) = 0xff;
+
+ /*
+ * Construct a 1024 element page directory with 4 MB pages for
+ * the identity mapped page table used in real and protected mode
+ * without paging with EPT.
+ */
+ pVM->hm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);
+ for (uint32_t i = 0; i < X86_PG_ENTRIES; i++)
+ {
+ pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i;
+ pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US
+ | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS
+ | X86_PDE4M_G;
+ }
+
+ /* We convert it here every time as pci regions could be reconfigured. */
+ rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
+ AssertRCReturn(rc, rc);
+ LogRel(("HM: Real Mode TSS guest physaddr = %#RGp\n", GCPhys));
+
+ rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
+ AssertRCReturn(rc, rc);
+ LogRel(("HM: Non-Paging Mode EPT CR3 = %#RGp\n", GCPhys));
+ }
+ else
+ {
+ /** @todo This cannot possibly work, there are other places which assumes
+ * this allocation cannot fail (see HMR3CanExecuteGuest()). Make this
+ * a failure case. */
+ LogRel(("HM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
+ pVM->hm.s.vmx.pRealModeTSS = NULL;
+ pVM->hm.s.vmx.pNonPagingModeEPTPageTable = NULL;
+ }
+ }
+
+ LogRel((pVM->hm.s.fAllow64BitGuests
+ ? "HM: Guest support: 32-bit and 64-bit.\n"
+ : "HM: Guest support: 32-bit only.\n"));
+
+ /*
+ * Call ring-0 to set up the VM.
+ */
+ rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /* idCpu */, VMMR0_DO_HM_SETUP_VM, 0 /* u64Arg */, NULL /* pReqHdr */);
+ if (rc != VINF_SUCCESS)
+ {
+ AssertMsgFailed(("%Rrc\n", rc));
+ LogRel(("HM: VMX setup failed with rc=%Rrc!\n", rc));
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = &pVM->aCpus[i];
+ LogRel(("HM: CPU[%u] Last instruction error %#x\n", i, pVCpu->hm.s.vmx.LastError.u32InstrError));
+ LogRel(("HM: CPU[%u] HM error %#x (%u)\n", i, pVCpu->hm.s.u32HMError, pVCpu->hm.s.u32HMError));
+ }
+ return VMSetError(pVM, rc, RT_SRC_POS, "VT-x setup failed: %Rrc", rc);
+ }
+
+ LogRel(("HM: VMX enabled!\n"));
+ pVM->hm.s.vmx.fEnabled = true;
+
+ hmR3DisableRawMode(pVM); /** @todo make this go away! */
+
+ /*
+ * Change the CPU features.
+ */
+ CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
+ if (pVM->hm.s.fAllow64BitGuests)
+ {
+ CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
+ CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
+ CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* 64 bits only on Intel CPUs */
+ CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
+ CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
+ }
+ /* Turn on NXE if PAE has been enabled *and* the host has turned on NXE
+ (we reuse the host EFER in the switcher). */
+ /** @todo this needs to be fixed properly!! */
+ else if (CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
+ {
+ if (pVM->hm.s.vmx.u64HostEfer & MSR_K6_EFER_NXE)
+ CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
+ else
+ LogRel(("HM: NX not enabled on the host, unavailable to PAE guest.\n"));
+ }
+
+ /*
+ * Log configuration details.
+ */
+ if (pVM->hm.s.fNestedPaging)
+ {
+ LogRel(("HM: Nested paging enabled!\n"));
+ if (pVM->hm.s.vmx.enmFlushEpt == VMX_FLUSH_EPT_SINGLE_CONTEXT)
+ LogRel(("HM: EPT flush type = VMX_FLUSH_EPT_SINGLE_CONTEXT\n"));
+ else if (pVM->hm.s.vmx.enmFlushEpt == VMX_FLUSH_EPT_ALL_CONTEXTS)
+ LogRel(("HM: EPT flush type = VMX_FLUSH_EPT_ALL_CONTEXTS\n"));
+ else if (pVM->hm.s.vmx.enmFlushEpt == VMX_FLUSH_EPT_NOT_SUPPORTED)
+ LogRel(("HM: EPT flush type = VMX_FLUSH_EPT_NOT_SUPPORTED\n"));
+ else
+ LogRel(("HM: EPT flush type = %d\n", pVM->hm.s.vmx.enmFlushEpt));
+
+ if (pVM->hm.s.vmx.fUnrestrictedGuest)
+ LogRel(("HM: Unrestricted guest execution enabled!\n"));
+
+#if HC_ARCH_BITS == 64
+ if (pVM->hm.s.fLargePages)
+ {
+ /* Use large (2 MB) pages for our EPT PDEs where possible. */
+ PGMSetLargePageUsage(pVM, true);
+ LogRel(("HM: Large page support enabled!\n"));
+ }
+#endif
+ }
+ else
+ Assert(!pVM->hm.s.vmx.fUnrestrictedGuest);
+
+ if (pVM->hm.s.vmx.fVpid)
+ {
+ LogRel(("HM: VPID enabled!\n"));
+ if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_INDIV_ADDR)
+ LogRel(("HM: VPID flush type = VMX_FLUSH_VPID_INDIV_ADDR\n"));
+ else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_SINGLE_CONTEXT)
+ LogRel(("HM: VPID flush type = VMX_FLUSH_VPID_SINGLE_CONTEXT\n"));
+ else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_ALL_CONTEXTS)
+ LogRel(("HM: VPID flush type = VMX_FLUSH_VPID_ALL_CONTEXTS\n"));
+ else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
+ LogRel(("HM: VPID flush type = VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS\n"));
+ else
+ LogRel(("HM: VPID flush type = %d\n", pVM->hm.s.vmx.enmFlushVpid));
+ }
+ else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_NOT_SUPPORTED)
+ LogRel(("HM: Ignoring VPID capabilities of CPU.\n"));
+
+ /*
+ * Check for preemption timer config override and log the state of it.
+ */
+ if (pVM->hm.s.vmx.fUsePreemptTimer)
+ {
+ PCFGMNODE pCfgHm = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HM");
+ rc = CFGMR3QueryBoolDef(pCfgHm, "UsePreemptTimer", &pVM->hm.s.vmx.fUsePreemptTimer, true);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ if (pVM->hm.s.vmx.fUsePreemptTimer)
+ LogRel(("HM: VMX-preemption timer enabled (cPreemptTimerShift=%u).\n", pVM->hm.s.vmx.cPreemptTimerShift));
+ else
+ LogRel(("HM: VMX-preemption timer disabled.\n"));
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Finish AMD-V initialization (after ring-0 init).
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
+static int hmR3InitFinalizeR0Amd(PVM pVM)
+{
+ Log(("pVM->hm.s.svm.fSupported = %d\n", pVM->hm.s.svm.fSupported));
+
+ LogRel(("HM: Using AMD-V implementation 2.0!\n"));
+
+ uint32_t u32Family;
+ uint32_t u32Model;
+ uint32_t u32Stepping;
+ if (HMAmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
+ LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
+ LogRel(("HM: CPUID 0x80000001.u32AMDFeatureECX = %#RX32\n", pVM->hm.s.cpuid.u32AMDFeatureECX));
+ LogRel(("HM: CPUID 0x80000001.u32AMDFeatureEDX = %#RX32\n", pVM->hm.s.cpuid.u32AMDFeatureEDX));
+ LogRel(("HM: AMD HWCR MSR = %#RX64\n", pVM->hm.s.svm.u64MsrHwcr));
+ LogRel(("HM: AMD-V revision = %#x\n", pVM->hm.s.svm.u32Rev));
+ LogRel(("HM: AMD-V max ASID = %RU32\n", pVM->hm.s.uMaxAsid));
+ LogRel(("HM: AMD-V features = %#x\n", pVM->hm.s.svm.u32Features));
+
+ /*
+ * Enumerate AMD-V features.
+ */
+ static const struct { uint32_t fFlag; const char *pszName; } s_aSvmFeatures[] =
+ {
+#define HMSVM_REPORT_FEATURE(a_Define) { a_Define, #a_Define }
+ HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
+ HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT),
+ HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK),
+ HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE),
+ HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR),
+ HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN),
+ HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID),
+ HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST),
+ HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
+ HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD),
+ HMSVM_REPORT_FEATURE(AMD_CPUID_SVM_FEATURE_EDX_AVIC),
+#undef HMSVM_REPORT_FEATURE
+ };
+
+ uint32_t fSvmFeatures = pVM->hm.s.svm.u32Features;
+ for (unsigned i = 0; i < RT_ELEMENTS(s_aSvmFeatures); i++)
+ if (fSvmFeatures & s_aSvmFeatures[i].fFlag)
+ {
+ LogRel(("HM: %s\n", s_aSvmFeatures[i].pszName));
+ fSvmFeatures &= ~s_aSvmFeatures[i].fFlag;
+ }
+ if (fSvmFeatures)
+ for (unsigned iBit = 0; iBit < 32; iBit++)
+ if (RT_BIT_32(iBit) & fSvmFeatures)
+ LogRel(("HM: Reserved bit %u\n", iBit));
+
+ /*
+ * Adjust feature(s).
+ */
+ if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
+ pVM->hm.s.fNestedPaging = pVM->hm.s.fAllowNestedPaging;
+
+ /*
+ * Call ring-0 to set up the VM.
+ */
+ int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HM_SETUP_VM, 0, NULL);
+ if (rc != VINF_SUCCESS)
+ {
+ AssertMsgFailed(("%Rrc\n", rc));
+ LogRel(("HM: AMD-V setup failed with rc=%Rrc!\n", rc));
+ return VMSetError(pVM, rc, RT_SRC_POS, "AMD-V setup failed: %Rrc", rc);
+ }
+
+ LogRel(("HM: AMD-V enabled!\n"));
+ pVM->hm.s.svm.fEnabled = true;
+
+ if (pVM->hm.s.fNestedPaging)
+ {
+ LogRel(("HM: Nested paging enabled!\n"));
+
+ /*
+ * Enable large pages (2 MB) if applicable.
+ */
+#if HC_ARCH_BITS == 64
+ if (pVM->hm.s.fLargePages)
+ {
+ PGMSetLargePageUsage(pVM, true);
+ LogRel(("HM: Large page support enabled!\n"));
+ }
+#endif
+ }
+
+ hmR3DisableRawMode(pVM);
+
+ /*
+ * Change the CPU features.
+ */
+ CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
+ CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
+ if (pVM->hm.s.fAllow64BitGuests)
+ {
+ CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
+ CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
+ CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
+ CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
+ }
+ /* Turn on NXE if PAE has been enabled. */
+ else if (CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
+ CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
+
+ LogRel(("HM: TPR Patching %s.\n", (pVM->hm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
+
+ LogRel((pVM->hm.s.fAllow64BitGuests
+ ? "HM: Guest support: 32-bit and 64-bit.\n"
+ : "HM: Guest support: 32-bit only.\n"));
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Applies relocations to data and code managed by this
+ * component. This function will be called at init and
+ * whenever the VMM need to relocate it self inside the GC.
+ *
+ * @param pVM The VM.
+ */
+VMMR3_INT_DECL(void) HMR3Relocate(PVM pVM)
+{
+ Log(("HMR3Relocate to %RGv\n", MMHyperGetArea(pVM, 0)));
+
+ /* Fetch the current paging mode during the relocate callback during state loading. */
+ if (VMR3GetState(pVM) == VMSTATE_LOADING)
+ {
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = &pVM->aCpus[i];
+ pVCpu->hm.s.enmShadowMode = PGMGetShadowMode(pVCpu);
+ }
+ }
+#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
+ if (HMIsEnabled(pVM))
+ {
+ switch (PGMGetHostMode(pVM))
+ {
+ case PGMMODE_32_BIT:
+ pVM->hm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_32_TO_AMD64);
+ break;
+
+ case PGMMODE_PAE:
+ case PGMMODE_PAE_NX:
+ pVM->hm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_PAE_TO_AMD64);
+ break;
+
+ default:
+ AssertFailed();
+ break;
+ }
+ }
+#endif
+ return;
+}
+
+
+/**
+ * Notification callback which is called whenever there is a chance that a CR3
+ * value might have changed.
+ *
+ * This is called by PGM.
+ *
+ * @param pVM Pointer to the VM.
+ * @param pVCpu Pointer to the VMCPU.
+ * @param enmShadowMode New shadow paging mode.
+ * @param enmGuestMode New guest paging mode.
+ */
+VMMR3_INT_DECL(void) HMR3PagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
+{
+ /* Ignore page mode changes during state loading. */
+ if (VMR3GetState(pVCpu->pVMR3) == VMSTATE_LOADING)
+ return;
+
+ pVCpu->hm.s.enmShadowMode = enmShadowMode;
+
+ /*
+ * If the guest left protected mode VMX execution, we'll have to be
+ * extra careful if/when the guest switches back to protected mode.
+ */
+ if (enmGuestMode == PGMMODE_REAL)
+ {
+ Log(("HMR3PagingModeChanged indicates real mode execution\n"));
+ pVCpu->hm.s.vmx.fWasInRealMode = true;
+ }
+
+ /** @todo r=ramshankar: Disabling for now. If nothing breaks remove it
+ * eventually. (Test platforms that use the cache ofc). */
+#if 0
+#ifdef VMX_USE_CACHED_VMCS_ACCESSES
+ /* Reset the contents of the read cache. */
+ PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
+ for (unsigned j = 0; j < pCache->Read.cValidEntries; j++)
+ pCache->Read.aFieldVal[j] = 0;
+#endif
+#endif
+}
+
+
+/**
+ * Terminates the HM.
+ *
+ * Termination means cleaning up and freeing all resources,
+ * the VM itself is, at this point, powered off or suspended.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ */
+VMMR3_INT_DECL(int) HMR3Term(PVM pVM)
+{
+ if (pVM->hm.s.vmx.pRealModeTSS)
+ {
+ PDMR3VmmDevHeapFree(pVM, pVM->hm.s.vmx.pRealModeTSS);
+ pVM->hm.s.vmx.pRealModeTSS = 0;
+ }
+ hmR3TermCPU(pVM);
+ return 0;
+}
+
+
+/**
+ * Terminates the per-VCPU HM.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ */
+static int hmR3TermCPU(PVM pVM)
+{
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = &pVM->aCpus[i]; NOREF(pVCpu);
+
+#ifdef VBOX_WITH_STATISTICS
+ if (pVCpu->hm.s.paStatExitReason)
+ {
+ MMHyperFree(pVM, pVCpu->hm.s.paStatExitReason);
+ pVCpu->hm.s.paStatExitReason = NULL;
+ pVCpu->hm.s.paStatExitReasonR0 = NIL_RTR0PTR;
+ }
+ if (pVCpu->hm.s.paStatInjectedIrqs)
+ {
+ MMHyperFree(pVM, pVCpu->hm.s.paStatInjectedIrqs);
+ pVCpu->hm.s.paStatInjectedIrqs = NULL;
+ pVCpu->hm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR;
+ }
+#endif
+
+#ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ memset(pVCpu->hm.s.vmx.VMCSCache.aMagic, 0, sizeof(pVCpu->hm.s.vmx.VMCSCache.aMagic));
+ pVCpu->hm.s.vmx.VMCSCache.uMagic = 0;
+ pVCpu->hm.s.vmx.VMCSCache.uPos = 0xffffffff;
+#endif
+ }
+ return 0;
+}
+
+
+/**
+ * Resets a virtual CPU.
+ *
+ * Used by HMR3Reset and CPU hot plugging.
+ *
+ * @param pVCpu The CPU to reset.
+ */
+VMMR3_INT_DECL(void) HMR3ResetCpu(PVMCPU pVCpu)
+{
+ /* Sync. entire state on VM reset R0-reentry. It's safe to reset
+ the HM flags here, all other EMTs are in ring-3. See VMR3Reset(). */
+ HMCPU_CF_RESET_TO(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST);
+
+ pVCpu->hm.s.vmx.u32CR0Mask = 0;
+ pVCpu->hm.s.vmx.u32CR4Mask = 0;
+ pVCpu->hm.s.fActive = false;
+ pVCpu->hm.s.Event.fPending = false;
+ pVCpu->hm.s.vmx.fWasInRealMode = true;
+ pVCpu->hm.s.vmx.u64MsrApicBase = 0;
+
+ /* Reset the contents of the read cache. */
+ PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
+ for (unsigned j = 0; j < pCache->Read.cValidEntries; j++)
+ pCache->Read.aFieldVal[j] = 0;
+
+#ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ /* Magic marker for searching in crash dumps. */
+ strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
+ pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
+#endif
+}
+
+
+/**
+ * The VM is being reset.
+ *
+ * For the HM component this means that any GDT/LDT/TSS monitors
+ * needs to be removed.
+ *
+ * @param pVM Pointer to the VM.
+ */
+VMMR3_INT_DECL(void) HMR3Reset(PVM pVM)
+{
+ LogFlow(("HMR3Reset:\n"));
+
+ if (HMIsEnabled(pVM))
+ hmR3DisableRawMode(pVM);
+
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = &pVM->aCpus[i];
+
+ HMR3ResetCpu(pVCpu);
+ }
+
+ /* Clear all patch information. */
+ pVM->hm.s.pGuestPatchMem = 0;
+ pVM->hm.s.pFreeGuestPatchMem = 0;
+ pVM->hm.s.cbGuestPatchMem = 0;
+ pVM->hm.s.cPatches = 0;
+ pVM->hm.s.PatchTree = 0;
+ pVM->hm.s.fTPRPatchingActive = false;
+ ASMMemZero32(pVM->hm.s.aPatches, sizeof(pVM->hm.s.aPatches));
+}
+
+
+/**
+ * Callback to patch a TPR instruction (vmmcall or mov cr8).
+ *
+ * @returns VBox strict status code.
+ * @param pVM Pointer to the VM.
+ * @param pVCpu The VMCPU for the EMT we're being called on.
+ * @param pvUser Unused.
+ */
+DECLCALLBACK(VBOXSTRICTRC) hmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
+
+ /* Only execute the handler on the VCPU the original patch request was issued. */
+ if (pVCpu->idCpu != idCpu)
+ return VINF_SUCCESS;
+
+ Log(("hmR3RemovePatches\n"));
+ for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
+ {
+ uint8_t abInstr[15];
+ PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
+ RTGCPTR pInstrGC = (RTGCPTR)pPatch->Core.Key;
+ int rc;
+
+#ifdef LOG_ENABLED
+ char szOutput[256];
+
+ rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
+ szOutput, sizeof(szOutput), NULL);
+ if (RT_SUCCESS(rc))
+ Log(("Patched instr: %s\n", szOutput));
+#endif
+
+ /* Check if the instruction is still the same. */
+ rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pInstrGC, pPatch->cbNewOp);
+ if (rc != VINF_SUCCESS)
+ {
+ Log(("Patched code removed? (rc=%Rrc0\n", rc));
+ continue; /* swapped out or otherwise removed; skip it. */
+ }
+
+ if (memcmp(abInstr, pPatch->aNewOpcode, pPatch->cbNewOp))
+ {
+ Log(("Patched instruction was changed! (rc=%Rrc0\n", rc));
+ continue; /* skip it. */
+ }
+
+ rc = PGMPhysSimpleWriteGCPtr(pVCpu, pInstrGC, pPatch->aOpcode, pPatch->cbOp);
+ AssertRC(rc);
+
+#ifdef LOG_ENABLED
+ rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
+ szOutput, sizeof(szOutput), NULL);
+ if (RT_SUCCESS(rc))
+ Log(("Original instr: %s\n", szOutput));
+#endif
+ }
+ pVM->hm.s.cPatches = 0;
+ pVM->hm.s.PatchTree = 0;
+ pVM->hm.s.pFreeGuestPatchMem = pVM->hm.s.pGuestPatchMem;
+ pVM->hm.s.fTPRPatchingActive = false;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for enabling patching in a VT-x/AMD-V guest.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ * @param idCpu VCPU to execute hmR3RemovePatches on.
+ * @param pPatchMem Patch memory range.
+ * @param cbPatchMem Size of the memory range.
+ */
+static int hmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
+{
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches, (void *)(uintptr_t)idCpu);
+ AssertRC(rc);
+
+ pVM->hm.s.pGuestPatchMem = pPatchMem;
+ pVM->hm.s.pFreeGuestPatchMem = pPatchMem;
+ pVM->hm.s.cbGuestPatchMem = cbPatchMem;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Enable patching in a VT-x/AMD-V guest
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ * @param pPatchMem Patch memory range.
+ * @param cbPatchMem Size of the memory range.
+ */
+VMMR3_INT_DECL(int) HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
+{
+ VM_ASSERT_EMT(pVM);
+ Log(("HMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
+ if (pVM->cCpus > 1)
+ {
+ /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
+ int rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE,
+ (PFNRT)hmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
+ AssertRC(rc);
+ return rc;
+ }
+ return hmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
+}
+
+
+/**
+ * Disable patching in a VT-x/AMD-V guest.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ * @param pPatchMem Patch memory range.
+ * @param cbPatchMem Size of the memory range.
+ */
+VMMR3_INT_DECL(int) HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
+{
+ Log(("HMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
+
+ Assert(pVM->hm.s.pGuestPatchMem == pPatchMem);
+ Assert(pVM->hm.s.cbGuestPatchMem == cbPatchMem);
+
+ /* @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches,
+ (void *)(uintptr_t)VMMGetCpuId(pVM));
+ AssertRC(rc);
+
+ pVM->hm.s.pGuestPatchMem = 0;
+ pVM->hm.s.pFreeGuestPatchMem = 0;
+ pVM->hm.s.cbGuestPatchMem = 0;
+ pVM->hm.s.fTPRPatchingActive = false;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Callback to patch a TPR instruction (vmmcall or mov cr8).
+ *
+ * @returns VBox strict status code.
+ * @param pVM Pointer to the VM.
+ * @param pVCpu The VMCPU for the EMT we're being called on.
+ * @param pvUser User specified CPU context.
+ *
+ */
+DECLCALLBACK(VBOXSTRICTRC) hmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ /*
+ * Only execute the handler on the VCPU the original patch request was
+ * issued. (The other CPU(s) might not yet have switched to protected
+ * mode, nor have the correct memory context.)
+ */
+ VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
+ if (pVCpu->idCpu != idCpu)
+ return VINF_SUCCESS;
+
+ /*
+ * We're racing other VCPUs here, so don't try patch the instruction twice
+ * and make sure there is still room for our patch record.
+ */
+ PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+ PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
+ if (pPatch)
+ {
+ Log(("hmR3ReplaceTprInstr: already patched %RGv\n", pCtx->rip));
+ return VINF_SUCCESS;
+ }
+ uint32_t const idx = pVM->hm.s.cPatches;
+ if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
+ {
+ Log(("hmR3ReplaceTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
+ return VINF_SUCCESS;
+ }
+ pPatch = &pVM->hm.s.aPatches[idx];
+
+ Log(("hmR3ReplaceTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
+
+ /*
+ * Disassembler the instruction and get cracking.
+ */
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "hmR3ReplaceTprInstr");
+ PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
+ uint32_t cbOp;
+ int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
+ AssertRC(rc);
+ if ( rc == VINF_SUCCESS
+ && pDis->pCurInstr->uOpcode == OP_MOV
+ && cbOp >= 3)
+ {
+ static uint8_t const s_abVMMCall[3] = { 0x0f, 0x01, 0xd9 };
+
+ rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
+ AssertRC(rc);
+
+ pPatch->cbOp = cbOp;
+
+ if (pDis->Param1.fUse == DISUSE_DISPLACEMENT32)
+ {
+ /* write. */
+ if (pDis->Param2.fUse == DISUSE_REG_GEN32)
+ {
+ pPatch->enmType = HMTPRINSTR_WRITE_REG;
+ pPatch->uSrcOperand = pDis->Param2.Base.idxGenReg;
+ Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_REG %u\n", pDis->Param2.Base.idxGenReg));
+ }
+ else
+ {
+ Assert(pDis->Param2.fUse == DISUSE_IMMEDIATE32);
+ pPatch->enmType = HMTPRINSTR_WRITE_IMM;
+ pPatch->uSrcOperand = pDis->Param2.uValue;
+ Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_IMM %#llx\n", pDis->Param2.uValue));
+ }
+ rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
+ AssertRC(rc);
+
+ memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
+ pPatch->cbNewOp = sizeof(s_abVMMCall);
+ }
+ else
+ {
+ /*
+ * TPR Read.
+ *
+ * Found:
+ * mov eax, dword [fffe0080] (5 bytes)
+ * Check if next instruction is:
+ * shr eax, 4
+ */
+ Assert(pDis->Param1.fUse == DISUSE_REG_GEN32);
+
+ uint8_t const idxMmioReg = pDis->Param1.Base.idxGenReg;
+ uint8_t const cbOpMmio = cbOp;
+ uint64_t const uSavedRip = pCtx->rip;
+
+ pCtx->rip += cbOp;
+ rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Following read");
+ pCtx->rip = uSavedRip;
+
+ if ( rc == VINF_SUCCESS
+ && pDis->pCurInstr->uOpcode == OP_SHR
+ && pDis->Param1.fUse == DISUSE_REG_GEN32
+ && pDis->Param1.Base.idxGenReg == idxMmioReg
+ && pDis->Param2.fUse == DISUSE_IMMEDIATE8
+ && pDis->Param2.uValue == 4
+ && cbOpMmio + cbOp < sizeof(pVM->hm.s.aPatches[idx].aOpcode))
+ {
+ uint8_t abInstr[15];
+
+ /* Replacing the two instructions above with an AMD-V specific lock-prefixed 32-bit MOV CR8 instruction so as to
+ access CR8 in 32-bit mode and not cause a #VMEXIT. */
+ rc = PGMPhysSimpleReadGCPtr(pVCpu, &pPatch->aOpcode, pCtx->rip, cbOpMmio + cbOp);
+ AssertRC(rc);
+
+ pPatch->cbOp = cbOpMmio + cbOp;
+
+ /* 0xF0, 0x0F, 0x20, 0xC0 = mov eax, cr8 */
+ abInstr[0] = 0xF0;
+ abInstr[1] = 0x0F;
+ abInstr[2] = 0x20;
+ abInstr[3] = 0xC0 | pDis->Param1.Base.idxGenReg;
+ for (unsigned i = 4; i < pPatch->cbOp; i++)
+ abInstr[i] = 0x90; /* nop */
+
+ rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, abInstr, pPatch->cbOp);
+ AssertRC(rc);
+
+ memcpy(pPatch->aNewOpcode, abInstr, pPatch->cbOp);
+ pPatch->cbNewOp = pPatch->cbOp;
+
+ Log(("Acceptable read/shr candidate!\n"));
+ pPatch->enmType = HMTPRINSTR_READ_SHR4;
+ }
+ else
+ {
+ pPatch->enmType = HMTPRINSTR_READ;
+ pPatch->uDstOperand = idxMmioReg;
+
+ rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
+ AssertRC(rc);
+
+ memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
+ pPatch->cbNewOp = sizeof(s_abVMMCall);
+ Log(("hmR3ReplaceTprInstr: HMTPRINSTR_READ %u\n", pPatch->uDstOperand));
+ }
+ }
+
+ pPatch->Core.Key = pCtx->eip;
+ rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
+ AssertRC(rc);
+
+ pVM->hm.s.cPatches++;
+ STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccess);
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Save invalid patch, so we will not try again.
+ */
+ Log(("hmR3ReplaceTprInstr: Failed to patch instr!\n"));
+ pPatch->Core.Key = pCtx->eip;
+ pPatch->enmType = HMTPRINSTR_INVALID;
+ rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
+ AssertRC(rc);
+ pVM->hm.s.cPatches++;
+ STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceFailure);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Callback to patch a TPR instruction (jump to generated code).
+ *
+ * @returns VBox strict status code.
+ * @param pVM Pointer to the VM.
+ * @param pVCpu The VMCPU for the EMT we're being called on.
+ * @param pvUser User specified CPU context.
+ *
+ */
+DECLCALLBACK(VBOXSTRICTRC) hmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
+{
+ /*
+ * Only execute the handler on the VCPU the original patch request was
+ * issued. (The other CPU(s) might not yet have switched to protected
+ * mode, nor have the correct memory context.)
+ */
+ VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
+ if (pVCpu->idCpu != idCpu)
+ return VINF_SUCCESS;
+
+ /*
+ * We're racing other VCPUs here, so don't try patch the instruction twice
+ * and make sure there is still room for our patch record.
+ */
+ PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+ PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
+ if (pPatch)
+ {
+ Log(("hmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
+ return VINF_SUCCESS;
+ }
+ uint32_t const idx = pVM->hm.s.cPatches;
+ if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
+ {
+ Log(("hmR3PatchTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
+ return VINF_SUCCESS;
+ }
+ pPatch = &pVM->hm.s.aPatches[idx];
+
+ Log(("hmR3PatchTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "hmR3PatchTprInstr");
+
+ /*
+ * Disassemble the instruction and get cracking.
+ */
+ PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
+ uint32_t cbOp;
+ int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
+ AssertRC(rc);
+ if ( rc == VINF_SUCCESS
+ && pDis->pCurInstr->uOpcode == OP_MOV
+ && cbOp >= 5)
+ {
+ uint8_t aPatch[64];
+ uint32_t off = 0;
+
+ rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
+ AssertRC(rc);
+
+ pPatch->cbOp = cbOp;
+ pPatch->enmType = HMTPRINSTR_JUMP_REPLACEMENT;
+
+ if (pDis->Param1.fUse == DISUSE_DISPLACEMENT32)
+ {
+ /*
+ * TPR write:
+ *
+ * push ECX [51]
+ * push EDX [52]
+ * push EAX [50]
+ * xor EDX,EDX [31 D2]
+ * mov EAX,EAX [89 C0]
+ * or
+ * mov EAX,0000000CCh [B8 CC 00 00 00]
+ * mov ECX,0C0000082h [B9 82 00 00 C0]
+ * wrmsr [0F 30]
+ * pop EAX [58]
+ * pop EDX [5A]
+ * pop ECX [59]
+ * jmp return_address [E9 return_address]
+ *
+ */
+ bool fUsesEax = (pDis->Param2.fUse == DISUSE_REG_GEN32 && pDis->Param2.Base.idxGenReg == DISGREG_EAX);
+
+ aPatch[off++] = 0x51; /* push ecx */
+ aPatch[off++] = 0x52; /* push edx */
+ if (!fUsesEax)
+ aPatch[off++] = 0x50; /* push eax */
+ aPatch[off++] = 0x31; /* xor edx, edx */
+ aPatch[off++] = 0xD2;
+ if (pDis->Param2.fUse == DISUSE_REG_GEN32)
+ {
+ if (!fUsesEax)
+ {
+ aPatch[off++] = 0x89; /* mov eax, src_reg */
+ aPatch[off++] = MAKE_MODRM(3, pDis->Param2.Base.idxGenReg, DISGREG_EAX);
+ }
+ }
+ else
+ {
+ Assert(pDis->Param2.fUse == DISUSE_IMMEDIATE32);
+ aPatch[off++] = 0xB8; /* mov eax, immediate */
+ *(uint32_t *)&aPatch[off] = pDis->Param2.uValue;
+ off += sizeof(uint32_t);
+ }
+ aPatch[off++] = 0xB9; /* mov ecx, 0xc0000082 */
+ *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
+ off += sizeof(uint32_t);
+
+ aPatch[off++] = 0x0F; /* wrmsr */
+ aPatch[off++] = 0x30;
+ if (!fUsesEax)
+ aPatch[off++] = 0x58; /* pop eax */
+ aPatch[off++] = 0x5A; /* pop edx */
+ aPatch[off++] = 0x59; /* pop ecx */
+ }
+ else
+ {
+ /*
+ * TPR read:
+ *
+ * push ECX [51]
+ * push EDX [52]
+ * push EAX [50]
+ * mov ECX,0C0000082h [B9 82 00 00 C0]
+ * rdmsr [0F 32]
+ * mov EAX,EAX [89 C0]
+ * pop EAX [58]
+ * pop EDX [5A]
+ * pop ECX [59]
+ * jmp return_address [E9 return_address]
+ *
+ */
+ Assert(pDis->Param1.fUse == DISUSE_REG_GEN32);
+
+ if (pDis->Param1.Base.idxGenReg != DISGREG_ECX)
+ aPatch[off++] = 0x51; /* push ecx */
+ if (pDis->Param1.Base.idxGenReg != DISGREG_EDX )
+ aPatch[off++] = 0x52; /* push edx */
+ if (pDis->Param1.Base.idxGenReg != DISGREG_EAX)
+ aPatch[off++] = 0x50; /* push eax */
+
+ aPatch[off++] = 0x31; /* xor edx, edx */
+ aPatch[off++] = 0xD2;
+
+ aPatch[off++] = 0xB9; /* mov ecx, 0xc0000082 */
+ *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
+ off += sizeof(uint32_t);
+
+ aPatch[off++] = 0x0F; /* rdmsr */
+ aPatch[off++] = 0x32;
+
+ if (pDis->Param1.Base.idxGenReg != DISGREG_EAX)
+ {
+ aPatch[off++] = 0x89; /* mov dst_reg, eax */
+ aPatch[off++] = MAKE_MODRM(3, DISGREG_EAX, pDis->Param1.Base.idxGenReg);
+ }
+
+ if (pDis->Param1.Base.idxGenReg != DISGREG_EAX)
+ aPatch[off++] = 0x58; /* pop eax */
+ if (pDis->Param1.Base.idxGenReg != DISGREG_EDX )
+ aPatch[off++] = 0x5A; /* pop edx */
+ if (pDis->Param1.Base.idxGenReg != DISGREG_ECX)
+ aPatch[off++] = 0x59; /* pop ecx */
+ }
+ aPatch[off++] = 0xE9; /* jmp return_address */
+ *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem + off + 4);
+ off += sizeof(RTRCUINTPTR);
+
+ if (pVM->hm.s.pFreeGuestPatchMem + off <= pVM->hm.s.pGuestPatchMem + pVM->hm.s.cbGuestPatchMem)
+ {
+ /* Write new code to the patch buffer. */
+ rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hm.s.pFreeGuestPatchMem, aPatch, off);
+ AssertRC(rc);
+
+#ifdef LOG_ENABLED
+ uint32_t cbCurInstr;
+ for (RTGCPTR GCPtrInstr = pVM->hm.s.pFreeGuestPatchMem;
+ GCPtrInstr < pVM->hm.s.pFreeGuestPatchMem + off;
+ GCPtrInstr += RT_MAX(cbCurInstr, 1))
+ {
+ char szOutput[256];
+ rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, GCPtrInstr, DBGF_DISAS_FLAGS_DEFAULT_MODE,
+ szOutput, sizeof(szOutput), &cbCurInstr);
+ if (RT_SUCCESS(rc))
+ Log(("Patch instr %s\n", szOutput));
+ else
+ Log(("%RGv: rc=%Rrc\n", GCPtrInstr, rc));
+ }
+#endif
+
+ pPatch->aNewOpcode[0] = 0xE9;
+ *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
+
+ /* Overwrite the TPR instruction with a jump. */
+ rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->eip, pPatch->aNewOpcode, 5);
+ AssertRC(rc);
+
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Jump");
+
+ pVM->hm.s.pFreeGuestPatchMem += off;
+ pPatch->cbNewOp = 5;
+
+ pPatch->Core.Key = pCtx->eip;
+ rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
+ AssertRC(rc);
+
+ pVM->hm.s.cPatches++;
+ pVM->hm.s.fTPRPatchingActive = true;
+ STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchSuccess);
+ return VINF_SUCCESS;
+ }
+
+ Log(("Ran out of space in our patch buffer!\n"));
+ }
+ else
+ Log(("hmR3PatchTprInstr: Failed to patch instr!\n"));
+
+
+ /*
+ * Save invalid patch, so we will not try again.
+ */
+ pPatch = &pVM->hm.s.aPatches[idx];
+ pPatch->Core.Key = pCtx->eip;
+ pPatch->enmType = HMTPRINSTR_INVALID;
+ rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
+ AssertRC(rc);
+ pVM->hm.s.cPatches++;
+ STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchFailure);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Attempt to patch TPR mmio instructions.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ * @param pVCpu Pointer to the VMCPU.
+ * @param pCtx Pointer to the guest CPU context.
+ */
+VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
+{
+ NOREF(pCtx);
+ int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE,
+ pVM->hm.s.pGuestPatchMem ? hmR3PatchTprInstr : hmR3ReplaceTprInstr,
+ (void *)(uintptr_t)pVCpu->idCpu);
+ AssertRC(rc);
+ return rc;
+}
+
+
+/**
+ * Checks if a code selector (CS) is suitable for execution
+ * within VMX when unrestricted execution isn't available.
+ *
+ * @returns true if selector is suitable for VMX, otherwise
+ * false.
+ * @param pSel Pointer to the selector to check (CS).
+ * uStackDpl The CPL, aka the DPL of the stack segment.
+ */
+static bool hmR3IsCodeSelectorOkForVmx(PCPUMSELREG pSel, unsigned uStackDpl)
+{
+ /*
+ * Segment must be an accessed code segment, it must be present and it must
+ * be usable.
+ * Note! These are all standard requirements and if CS holds anything else
+ * we've got buggy code somewhere!
+ */
+ AssertCompile(X86DESCATTR_TYPE == 0xf);
+ AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P | X86DESCATTR_UNUSABLE))
+ == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P),
+ ("%#x\n", pSel->Attr.u),
+ false);
+
+ /* For conforming segments, CS.DPL must be <= SS.DPL, while CS.DPL
+ must equal SS.DPL for non-confroming segments.
+ Note! This is also a hard requirement like above. */
+ AssertMsgReturn( pSel->Attr.n.u4Type & X86_SEL_TYPE_CONF
+ ? pSel->Attr.n.u2Dpl <= uStackDpl
+ : pSel->Attr.n.u2Dpl == uStackDpl,
+ ("u4Type=%#x u2Dpl=%u uStackDpl=%u\n", pSel->Attr.n.u4Type, pSel->Attr.n.u2Dpl, uStackDpl),
+ false);
+
+ /*
+ * The following two requirements are VT-x specific:
+ * - G bit must be set if any high limit bits are set.
+ * - G bit must be clear if any low limit bits are clear.
+ */
+ if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
+ && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity) )
+ return true;
+ return false;
+}
+
+
+/**
+ * Checks if a data selector (DS/ES/FS/GS) is suitable for
+ * execution within VMX when unrestricted execution isn't
+ * available.
+ *
+ * @returns true if selector is suitable for VMX, otherwise
+ * false.
+ * @param pSel Pointer to the selector to check
+ * (DS/ES/FS/GS).
+ */
+static bool hmR3IsDataSelectorOkForVmx(PCPUMSELREG pSel)
+{
+ /*
+ * Unusable segments are OK. These days they should be marked as such, as
+ * but as an alternative we for old saved states and AMD<->VT-x migration
+ * we also treat segments with all the attributes cleared as unusable.
+ */
+ if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
+ return true;
+
+ /** @todo tighten these checks. Will require CPUM load adjusting. */
+
+ /* Segment must be accessed. */
+ if (pSel->Attr.u & X86_SEL_TYPE_ACCESSED)
+ {
+ /* Code segments must also be readable. */
+ if ( !(pSel->Attr.u & X86_SEL_TYPE_CODE)
+ || (pSel->Attr.u & X86_SEL_TYPE_READ))
+ {
+ /* The S bit must be set. */
+ if (pSel->Attr.n.u1DescType)
+ {
+ /* Except for conforming segments, DPL >= RPL. */
+ if ( pSel->Attr.n.u2Dpl >= (pSel->Sel & X86_SEL_RPL)
+ || pSel->Attr.n.u4Type >= X86_SEL_TYPE_ER_ACC)
+ {
+ /* Segment must be present. */
+ if (pSel->Attr.n.u1Present)
+ {
+ /*
+ * The following two requirements are VT-x specific:
+ * - G bit must be set if any high limit bits are set.
+ * - G bit must be clear if any low limit bits are clear.
+ */
+ if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
+ && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity) )
+ return true;
+ }
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+
+/**
+ * Checks if the stack selector (SS) is suitable for execution
+ * within VMX when unrestricted execution isn't available.
+ *
+ * @returns true if selector is suitable for VMX, otherwise
+ * false.
+ * @param pSel Pointer to the selector to check (SS).
+ */
+static bool hmR3IsStackSelectorOkForVmx(PCPUMSELREG pSel)
+{
+ /*
+ * Unusable segments are OK. These days they should be marked as such, as
+ * but as an alternative we for old saved states and AMD<->VT-x migration
+ * we also treat segments with all the attributes cleared as unusable.
+ */
+ /** @todo r=bird: actually all zeros isn't gonna cut it... SS.DPL == CPL. */
+ if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
+ return true;
+
+ /*
+ * Segment must be an accessed writable segment, it must be present.
+ * Note! These are all standard requirements and if SS holds anything else
+ * we've got buggy code somewhere!
+ */
+ AssertCompile(X86DESCATTR_TYPE == 0xf);
+ AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P | X86_SEL_TYPE_CODE))
+ == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P),
+ ("%#x\n", pSel->Attr.u),
+ false);
+
+ /* DPL must equal RPL.
+ Note! This is also a hard requirement like above. */
+ AssertMsgReturn(pSel->Attr.n.u2Dpl == (pSel->Sel & X86_SEL_RPL),
+ ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel),
+ false);
+
+ /*
+ * The following two requirements are VT-x specific:
+ * - G bit must be set if any high limit bits are set.
+ * - G bit must be clear if any low limit bits are clear.
+ */
+ if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
+ && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity) )
+ return true;
+ return false;
+}
+
+
+/**
+ * Force execution of the current IO code in the recompiler.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ * @param pCtx Partial VM execution context.
+ */
+VMMR3_INT_DECL(int) HMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx)
+{
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+
+ Assert(HMIsEnabled(pVM));
+ Log(("HMR3EmulateIoBlock\n"));
+
+ /* This is primarily intended to speed up Grub, so we don't care about paged protected mode. */
+ if (HMCanEmulateIoBlockEx(pCtx))
+ {
+ Log(("HMR3EmulateIoBlock -> enabled\n"));
+ pVCpu->hm.s.EmulateIoBlock.fEnabled = true;
+ pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip = pCtx->rip;
+ pVCpu->hm.s.EmulateIoBlock.cr0 = pCtx->cr0;
+ return VINF_EM_RESCHEDULE_REM;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Checks if we can currently use hardware accelerated raw mode.
+ *
+ * @returns true if we can currently use hardware acceleration, otherwise false.
+ * @param pVM Pointer to the VM.
+ * @param pCtx Partial VM execution context.
+ */
+VMMR3DECL(bool) HMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)
+{
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+
+ Assert(HMIsEnabled(pVM));
+
+ /* If we're still executing the IO code, then return false. */
+ if ( RT_UNLIKELY(pVCpu->hm.s.EmulateIoBlock.fEnabled)
+ && pCtx->rip < pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip + 0x200
+ && pCtx->rip > pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip - 0x200
+ && pCtx->cr0 == pVCpu->hm.s.EmulateIoBlock.cr0)
+ return false;
+
+ pVCpu->hm.s.EmulateIoBlock.fEnabled = false;
+
+ /* AMD-V supports real & protected mode with or without paging. */
+ if (pVM->hm.s.svm.fEnabled)
+ {
+ pVCpu->hm.s.fActive = true;
+ return true;
+ }
+
+ pVCpu->hm.s.fActive = false;
+
+ /* Note! The context supplied by REM is partial. If we add more checks here, be sure to verify that REM provides this info! */
+ Assert( (pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS)
+ || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));
+
+ bool fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVmmDevHeapIsEnabled(pVM);
+ if (!pVM->hm.s.vmx.fUnrestrictedGuest)
+ {
+ /*
+ * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted
+ * guest execution feature i missing (VT-x only).
+ */
+ if (fSupportsRealMode)
+ {
+ if (CPUMIsGuestInRealModeEx(pCtx))
+ {
+ /* In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
+ * bases and limits, i.e. limit must be 64K and base must be selector * 16.
+ * If this is not true, we cannot execute real mode as V86 and have to fall
+ * back to emulation.
+ */
+ if ( pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)
+ || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)
+ || pCtx->es.Sel != (pCtx->es.u64Base >> 4)
+ || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)
+ || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)
+ || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4))
+ {
+ STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelBase);
+ return false;
+ }
+ if ( (pCtx->cs.u32Limit != 0xffff)
+ || (pCtx->ds.u32Limit != 0xffff)
+ || (pCtx->es.u32Limit != 0xffff)
+ || (pCtx->ss.u32Limit != 0xffff)
+ || (pCtx->fs.u32Limit != 0xffff)
+ || (pCtx->gs.u32Limit != 0xffff))
+ {
+ STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit);
+ return false;
+ }
+ STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckRmOk);
+ }
+ else
+ {
+ /* Verify the requirements for executing code in protected
+ mode. VT-x can't handle the CPU state right after a switch
+ from real to protected mode. (all sorts of RPL & DPL assumptions). */
+ if (pVCpu->hm.s.vmx.fWasInRealMode)
+ {
+ /** @todo If guest is in V86 mode, these checks should be different! */
+ if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL))
+ {
+ STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRpl);
+ return false;
+ }
+ if ( !hmR3IsCodeSelectorOkForVmx(&pCtx->cs, pCtx->ss.Attr.n.u2Dpl)
+ || !hmR3IsDataSelectorOkForVmx(&pCtx->ds)
+ || !hmR3IsDataSelectorOkForVmx(&pCtx->es)
+ || !hmR3IsDataSelectorOkForVmx(&pCtx->fs)
+ || !hmR3IsDataSelectorOkForVmx(&pCtx->gs)
+ || !hmR3IsStackSelectorOkForVmx(&pCtx->ss))
+ {
+ STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
+ return false;
+ }
+ }
+ /* VT-x also chokes on invalid TR or LDTR selectors (minix). */
+ if (pCtx->gdtr.cbGdt)
+ {
+ if (pCtx->tr.Sel > pCtx->gdtr.cbGdt)
+ {
+ STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadTr);
+ return false;
+ }
+ else if (pCtx->ldtr.Sel > pCtx->gdtr.cbGdt)
+ {
+ STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadLdt);
+ return false;
+ }
+ }
+ STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckPmOk);
+ }
+ }
+ else
+ {
+ if ( !CPUMIsGuestInLongModeEx(pCtx)
+ && !pVM->hm.s.vmx.fUnrestrictedGuest)
+ {
+ if ( !pVM->hm.s.fNestedPaging /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
+ || CPUMIsGuestInRealModeEx(pCtx)) /* Requires a fake TSS for real mode - stored in the VMM device heap */
+ return false;
+
+ /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
+ if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0)
+ return false;
+
+ /* The guest is about to complete the switch to protected mode. Wait a bit longer. */
+ /* Windows XP; switch to protected mode; all selectors are marked not present in the
+ * hidden registers (possible recompiler bug; see load_seg_vm) */
+ if (pCtx->cs.Attr.n.u1Present == 0)
+ return false;
+ if (pCtx->ss.Attr.n.u1Present == 0)
+ return false;
+
+ /* Windows XP: possible same as above, but new recompiler requires new heuristics?
+ VT-x doesn't seem to like something about the guest state and this stuff avoids it. */
+ /** @todo This check is actually wrong, it doesn't take the direction of the
+ * stack segment into account. But, it does the job for now. */
+ if (pCtx->rsp >= pCtx->ss.u32Limit)
+ return false;
+ }
+ }
+ }
+
+ if (pVM->hm.s.vmx.fEnabled)
+ {
+ uint32_t mask;
+
+ /* if bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
+ mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr0Fixed0;
+ /* Note: We ignore the NE bit here on purpose; see vmmr0\hmr0.cpp for details. */
+ mask &= ~X86_CR0_NE;
+
+ if (fSupportsRealMode)
+ {
+ /* Note: We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
+ mask &= ~(X86_CR0_PG|X86_CR0_PE);
+ }
+ else
+ {
+ /* We support protected mode without paging using identity mapping. */
+ mask &= ~X86_CR0_PG;
+ }
+ if ((pCtx->cr0 & mask) != mask)
+ return false;
+
+ /* if bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
+ mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
+ if ((pCtx->cr0 & mask) != 0)
+ return false;
+
+ /* if bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
+ mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr4Fixed0;
+ mask &= ~X86_CR4_VMXE;
+ if ((pCtx->cr4 & mask) != mask)
+ return false;
+
+ /* if bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
+ mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
+ if ((pCtx->cr4 & mask) != 0)
+ return false;
+
+ pVCpu->hm.s.fActive = true;
+ return true;
+ }
+
+ return false;
+}
+
+
+/**
+ * Checks if we need to reschedule due to VMM device heap changes.
+ *
+ * @returns true if a reschedule is required, otherwise false.
+ * @param pVM Pointer to the VM.
+ * @param pCtx VM execution context.
+ */
+VMMR3_INT_DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx)
+{
+ /*
+ * The VMM device heap is a requirement for emulating real-mode or protected-mode without paging
+ * when the unrestricted guest execution feature is missing (VT-x only).
+ */
+ if ( pVM->hm.s.vmx.fEnabled
+ && !pVM->hm.s.vmx.fUnrestrictedGuest
+ && CPUMIsGuestInRealModeEx(pCtx)
+ && !PDMVmmDevHeapIsEnabled(pVM))
+ {
+ return true;
+ }
+
+ return false;
+}
+
+
+/**
+ * Notification from EM about a rescheduling into hardware assisted execution
+ * mode.
+ *
+ * @param pVCpu Pointer to the current VMCPU.
+ */
+VMMR3_INT_DECL(void) HMR3NotifyScheduled(PVMCPU pVCpu)
+{
+ HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
+}
+
+
+/**
+ * Notification from EM about returning from instruction emulation (REM / EM).
+ *
+ * @param pVCpu Pointer to the VMCPU.
+ */
+VMMR3_INT_DECL(void) HMR3NotifyEmulated(PVMCPU pVCpu)
+{
+ HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
+}
+
+
+/**
+ * Checks if we are currently using hardware accelerated raw mode.
+ *
+ * @returns true if hardware acceleration is being used, otherwise false.
+ * @param pVCpu Pointer to the VMCPU.
+ */
+VMMR3_INT_DECL(bool) HMR3IsActive(PVMCPU pVCpu)
+{
+ return pVCpu->hm.s.fActive;
+}
+
+
+/**
+ * External interface for querying whether hardware accelerated raw mode is
+ * enabled.
+ *
+ * @returns true if VT-x or AMD-V is being used, otherwise false.
+ * @param pUVM The user mode VM handle.
+ * @sa HMIsEnabled, HMIsEnabledNotMacro.
+ */
+VMMR3DECL(bool) HMR3IsEnabled(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return pVM->fHMEnabled; /* Don't use the macro as the GUI may query us very very early. */
+}
+
+
+/**
+ * External interface for querying whether VT-x is being used.
+ *
+ * @returns true if VT-x is being used, otherwise false.
+ * @param pUVM The user mode VM handle.
+ * @sa HMR3IsSvmEnabled, HMIsEnabled
+ */
+VMMR3DECL(bool) HMR3IsVmxEnabled(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return pVM->hm.s.vmx.fEnabled
+ && pVM->hm.s.vmx.fSupported
+ && pVM->fHMEnabled;
+}
+
+
+/**
+ * External interface for querying whether AMD-V is being used.
+ *
+ * @returns true if VT-x is being used, otherwise false.
+ * @param pUVM The user mode VM handle.
+ * @sa HMR3IsVmxEnabled, HMIsEnabled
+ */
+VMMR3DECL(bool) HMR3IsSvmEnabled(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return pVM->hm.s.svm.fEnabled
+ && pVM->hm.s.svm.fSupported
+ && pVM->fHMEnabled;
+}
+
+
+/**
+ * Checks if we are currently using nested paging.
+ *
+ * @returns true if nested paging is being used, otherwise false.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(bool) HMR3IsNestedPagingActive(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return pVM->hm.s.fNestedPaging;
+}
+
+
+/**
+ * Checks if we are currently using VPID in VT-x mode.
+ *
+ * @returns true if VPID is being used, otherwise false.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(bool) HMR3IsVpidActive(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return pVM->hm.s.vmx.fVpid;
+}
+
+
+/**
+ * Checks if we are currently using VT-x unrestricted execution,
+ * aka UX.
+ *
+ * @returns true if UX is being used, otherwise false.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(bool) HMR3IsUXActive(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return pVM->hm.s.vmx.fUnrestrictedGuest;
+}
+
+
+/**
+ * Checks if internal events are pending. In that case we are not allowed to dispatch interrupts.
+ *
+ * @returns true if an internal event is pending, otherwise false.
+ * @param pVM Pointer to the VM.
+ */
+VMMR3_INT_DECL(bool) HMR3IsEventPending(PVMCPU pVCpu)
+{
+ return HMIsEnabled(pVCpu->pVMR3) && pVCpu->hm.s.Event.fPending;
+}
+
+
+/**
+ * Checks if the VMX-preemption timer is being used.
+ *
+ * @returns true if the VMX-preemption timer is being used, otherwise false.
+ * @param pVM Pointer to the VM.
+ */
+VMMR3_INT_DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM)
+{
+ return HMIsEnabled(pVM)
+ && pVM->hm.s.vmx.fEnabled
+ && pVM->hm.s.vmx.fUsePreemptTimer;
+}
+
+
+/**
+ * Restart an I/O instruction that was refused in ring-0
+ *
+ * @returns Strict VBox status code. Informational status codes other than the one documented
+ * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
+ * @retval VINF_SUCCESS Success.
+ * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
+ * status code must be passed on to EM.
+ * @retval VERR_NOT_FOUND if no pending I/O instruction.
+ *
+ * @param pVM Pointer to the VM.
+ * @param pVCpu Pointer to the VMCPU.
+ * @param pCtx Pointer to the guest CPU context.
+ */
+VMMR3_INT_DECL(VBOXSTRICTRC) HMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
+{
+ HMPENDINGIO enmType = pVCpu->hm.s.PendingIO.enmType;
+
+ pVCpu->hm.s.PendingIO.enmType = HMPENDINGIO_INVALID;
+
+ if ( pVCpu->hm.s.PendingIO.GCPtrRip != pCtx->rip
+ || enmType == HMPENDINGIO_INVALID)
+ return VERR_NOT_FOUND;
+
+ VBOXSTRICTRC rcStrict;
+ switch (enmType)
+ {
+ case HMPENDINGIO_PORT_READ:
+ {
+ uint32_t uAndVal = pVCpu->hm.s.PendingIO.s.Port.uAndVal;
+ uint32_t u32Val = 0;
+
+ rcStrict = IOMIOPortRead(pVM, pVCpu, pVCpu->hm.s.PendingIO.s.Port.uPort,
+ &u32Val,
+ pVCpu->hm.s.PendingIO.s.Port.cbSize);
+ if (IOM_SUCCESS(rcStrict))
+ {
+ /* Write back to the EAX register. */
+ pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
+ pCtx->rip = pVCpu->hm.s.PendingIO.GCPtrRipNext;
+ }
+ break;
+ }
+
+ case HMPENDINGIO_PORT_WRITE:
+ rcStrict = IOMIOPortWrite(pVM, pVCpu, pVCpu->hm.s.PendingIO.s.Port.uPort,
+ pCtx->eax & pVCpu->hm.s.PendingIO.s.Port.uAndVal,
+ pVCpu->hm.s.PendingIO.s.Port.cbSize);
+ if (IOM_SUCCESS(rcStrict))
+ pCtx->rip = pVCpu->hm.s.PendingIO.GCPtrRipNext;
+ break;
+
+ default:
+ AssertLogRelFailedReturn(VERR_HM_UNKNOWN_IO_INSTRUCTION);
+ }
+
+ if (IOM_SUCCESS(rcStrict))
+ {
+ /*
+ * Check for I/O breakpoints.
+ */
+ uint32_t const uDr7 = pCtx->dr[7];
+ if ( ( (uDr7 & X86_DR7_ENABLED_MASK)
+ && X86_DR7_ANY_RW_IO(uDr7)
+ && (pCtx->cr4 & X86_CR4_DE))
+ || DBGFBpIsHwIoArmed(pVM))
+ {
+ VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, pVCpu->hm.s.PendingIO.s.Port.uPort,
+ pVCpu->hm.s.PendingIO.s.Port.cbSize);
+ if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
+ rcStrict2 = TRPMAssertTrap(pVCpu, X86_XCPT_DB, TRPM_TRAP);
+ /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */
+ else if (rcStrict2 != VINF_SUCCESS && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
+ rcStrict = rcStrict2;
+ }
+ }
+ return rcStrict;
+}
+
+
+/**
+ * Check fatal VT-x/AMD-V error and produce some meaningful
+ * log release message.
+ *
+ * @param pVM Pointer to the VM.
+ * @param iStatusCode VBox status code.
+ */
+VMMR3_INT_DECL(void) HMR3CheckError(PVM pVM, int iStatusCode)
+{
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = &pVM->aCpus[i];
+ switch (iStatusCode)
+ {
+ case VERR_VMX_INVALID_VMCS_FIELD:
+ break;
+
+ case VERR_VMX_INVALID_VMCS_PTR:
+ LogRel(("HM: VERR_VMX_INVALID_VMCS_PTR:\n"));
+ LogRel(("HM: CPU[%u] Current pointer %#RGp vs %#RGp\n", i, pVCpu->hm.s.vmx.LastError.u64VMCSPhys,
+ pVCpu->hm.s.vmx.HCPhysVmcs));
+ LogRel(("HM: CPU[%u] Current VMCS version %#x\n", i, pVCpu->hm.s.vmx.LastError.u32VMCSRevision));
+ LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
+ LogRel(("HM: CPU[%u] Current Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
+ break;
+
+ case VERR_VMX_UNABLE_TO_START_VM:
+ LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM:\n"));
+ LogRel(("HM: CPU[%u] Instruction error %#x\n", i, pVCpu->hm.s.vmx.LastError.u32InstrError));
+ LogRel(("HM: CPU[%u] Exit reason %#x\n", i, pVCpu->hm.s.vmx.LastError.u32ExitReason));
+
+ if ( pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX_ERROR_VMLAUCH_NON_CLEAR_VMCS
+ || pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX_ERROR_VMRESUME_NON_LAUNCHED_VMCS)
+ {
+ LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
+ LogRel(("HM: CPU[%u] Current Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
+ }
+ else if (pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)
+ {
+ LogRel(("HM: CPU[%u] PinCtls %#RX32\n", i, pVCpu->hm.s.vmx.u32PinCtls));
+ LogRel(("HM: CPU[%u] ProcCtls %#RX32\n", i, pVCpu->hm.s.vmx.u32ProcCtls));
+ LogRel(("HM: CPU[%u] ProcCtls2 %#RX32\n", i, pVCpu->hm.s.vmx.u32ProcCtls2));
+ LogRel(("HM: CPU[%u] EntryCtls %#RX32\n", i, pVCpu->hm.s.vmx.u32EntryCtls));
+ LogRel(("HM: CPU[%u] ExitCtls %#RX32\n", i, pVCpu->hm.s.vmx.u32ExitCtls));
+ LogRel(("HM: CPU[%u] MSRBitmapPhys %#RHp\n", i, pVCpu->hm.s.vmx.HCPhysMsrBitmap));
+#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
+ LogRel(("HM: CPU[%u] GuestMSRPhys %#RHp\n", i, pVCpu->hm.s.vmx.HCPhysGuestMsr));
+ LogRel(("HM: CPU[%u] HostMsrPhys %#RHp\n", i, pVCpu->hm.s.vmx.HCPhysHostMsr));
+ LogRel(("HM: CPU[%u] cGuestMSRs %u\n", i, pVCpu->hm.s.vmx.cGuestMsrs));
+#endif
+ }
+ /** @todo Log VM-entry event injection control fields
+ * VMX_VMCS_CTRL_ENTRY_IRQ_INFO, VMX_VMCS_CTRL_ENTRY_EXCEPTION_ERRCODE
+ * and VMX_VMCS_CTRL_ENTRY_INSTR_LENGTH from the VMCS. */
+ break;
+
+ case VERR_VMX_INVALID_VMXON_PTR:
+ break;
+
+ case VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO:
+ case VERR_VMX_INVALID_GUEST_STATE:
+ case VERR_VMX_UNEXPECTED_EXIT:
+ case VERR_SVM_UNKNOWN_EXIT:
+ case VERR_SVM_UNEXPECTED_EXIT:
+ case VERR_SVM_UNEXPECTED_PATCH_TYPE:
+ case VERR_SVM_UNEXPECTED_XCPT_EXIT:
+ case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE:
+ {
+ LogRel(("HM: CPU[%u] HM error %#x (%u)\n", i, pVCpu->hm.s.u32HMError, pVCpu->hm.s.u32HMError));
+ break;
+ }
+ }
+ }
+
+ if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM)
+ {
+ LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed %#RX32\n", pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1));
+ LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %#RX32\n", pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0));
+ }
+}
+
+
+/**
+ * Execute state save operation.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ * @param pSSM SSM operation handle.
+ */
+static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM)
+{
+ int rc;
+
+ Log(("hmR3Save:\n"));
+
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ /*
+ * Save the basic bits - fortunately all the other things can be resynced on load.
+ */
+ rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.Event.fPending);
+ AssertRCReturn(rc, rc);
+ rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.Event.u32ErrCode);
+ AssertRCReturn(rc, rc);
+ rc = SSMR3PutU64(pSSM, pVM->aCpus[i].hm.s.Event.u64IntInfo);
+ AssertRCReturn(rc, rc);
+ /** @todo Shouldn't we be saving GCPtrFaultAddress too? */
+
+ /** @todo We only need to save pVM->aCpus[i].hm.s.vmx.fWasInRealMode and
+ * perhaps not even that (the initial value of @c true is safe. */
+ uint32_t u32Dummy = PGMMODE_REAL;
+ rc = SSMR3PutU32(pSSM, u32Dummy);
+ AssertRCReturn(rc, rc);
+ rc = SSMR3PutU32(pSSM, u32Dummy);
+ AssertRCReturn(rc, rc);
+ rc = SSMR3PutU32(pSSM, u32Dummy);
+ AssertRCReturn(rc, rc);
+ }
+
+#ifdef VBOX_HM_WITH_GUEST_PATCHING
+ rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pGuestPatchMem);
+ AssertRCReturn(rc, rc);
+ rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pFreeGuestPatchMem);
+ AssertRCReturn(rc, rc);
+ rc = SSMR3PutU32(pSSM, pVM->hm.s.cbGuestPatchMem);
+ AssertRCReturn(rc, rc);
+
+ /* Store all the guest patch records too. */
+ rc = SSMR3PutU32(pSSM, pVM->hm.s.cPatches);
+ AssertRCReturn(rc, rc);
+
+ for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
+ {
+ PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
+
+ rc = SSMR3PutU32(pSSM, pPatch->Core.Key);
+ AssertRCReturn(rc, rc);
+
+ rc = SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
+ AssertRCReturn(rc, rc);
+
+ rc = SSMR3PutU32(pSSM, pPatch->cbOp);
+ AssertRCReturn(rc, rc);
+
+ rc = SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
+ AssertRCReturn(rc, rc);
+
+ rc = SSMR3PutU32(pSSM, pPatch->cbNewOp);
+ AssertRCReturn(rc, rc);
+
+ AssertCompileSize(HMTPRINSTR, 4);
+ rc = SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType);
+ AssertRCReturn(rc, rc);
+
+ rc = SSMR3PutU32(pSSM, pPatch->uSrcOperand);
+ AssertRCReturn(rc, rc);
+
+ rc = SSMR3PutU32(pSSM, pPatch->uDstOperand);
+ AssertRCReturn(rc, rc);
+
+ rc = SSMR3PutU32(pSSM, pPatch->pJumpTarget);
+ AssertRCReturn(rc, rc);
+
+ rc = SSMR3PutU32(pSSM, pPatch->cFaults);
+ AssertRCReturn(rc, rc);
+ }
+#endif
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Execute state load operation.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ * @param pSSM SSM operation handle.
+ * @param uVersion Data layout version.
+ * @param uPass The data pass.
+ */
+static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ int rc;
+
+ Log(("hmR3Load:\n"));
+ Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
+
+ /*
+ * Validate version.
+ */
+ if ( uVersion != HM_SSM_VERSION
+ && uVersion != HM_SSM_VERSION_NO_PATCHING
+ && uVersion != HM_SSM_VERSION_2_0_X)
+ {
+ AssertMsgFailed(("hmR3Load: Invalid version uVersion=%d!\n", uVersion));
+ return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
+ }
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.fPending);
+ AssertRCReturn(rc, rc);
+ rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.u32ErrCode);
+ AssertRCReturn(rc, rc);
+ rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hm.s.Event.u64IntInfo);
+ AssertRCReturn(rc, rc);
+
+ if (uVersion >= HM_SSM_VERSION_NO_PATCHING)
+ {
+ uint32_t val;
+ /** @todo See note in hmR3Save(). */
+ rc = SSMR3GetU32(pSSM, &val);
+ AssertRCReturn(rc, rc);
+ rc = SSMR3GetU32(pSSM, &val);
+ AssertRCReturn(rc, rc);
+ rc = SSMR3GetU32(pSSM, &val);
+ AssertRCReturn(rc, rc);
+ }
+ }
+#ifdef VBOX_HM_WITH_GUEST_PATCHING
+ if (uVersion > HM_SSM_VERSION_NO_PATCHING)
+ {
+ rc = SSMR3GetGCPtr(pSSM, &pVM->hm.s.pGuestPatchMem);
+ AssertRCReturn(rc, rc);
+ rc = SSMR3GetGCPtr(pSSM, &pVM->hm.s.pFreeGuestPatchMem);
+ AssertRCReturn(rc, rc);
+ rc = SSMR3GetU32(pSSM, &pVM->hm.s.cbGuestPatchMem);
+ AssertRCReturn(rc, rc);
+
+ /* Fetch all TPR patch records. */
+ rc = SSMR3GetU32(pSSM, &pVM->hm.s.cPatches);
+ AssertRCReturn(rc, rc);
+
+ for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
+ {
+ PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
+
+ rc = SSMR3GetU32(pSSM, &pPatch->Core.Key);
+ AssertRCReturn(rc, rc);
+
+ rc = SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
+ AssertRCReturn(rc, rc);
+
+ rc = SSMR3GetU32(pSSM, &pPatch->cbOp);
+ AssertRCReturn(rc, rc);
+
+ rc = SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
+ AssertRCReturn(rc, rc);
+
+ rc = SSMR3GetU32(pSSM, &pPatch->cbNewOp);
+ AssertRCReturn(rc, rc);
+
+ rc = SSMR3GetU32(pSSM, (uint32_t *)&pPatch->enmType);
+ AssertRCReturn(rc, rc);
+
+ if (pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT)
+ pVM->hm.s.fTPRPatchingActive = true;
+
+ Assert(pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT || pVM->hm.s.fTPRPatchingActive == false);
+
+ rc = SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
+ AssertRCReturn(rc, rc);
+
+ rc = SSMR3GetU32(pSSM, &pPatch->uDstOperand);
+ AssertRCReturn(rc, rc);
+
+ rc = SSMR3GetU32(pSSM, &pPatch->cFaults);
+ AssertRCReturn(rc, rc);
+
+ rc = SSMR3GetU32(pSSM, &pPatch->pJumpTarget);
+ AssertRCReturn(rc, rc);
+
+ Log(("hmR3Load: patch %d\n", i));
+ Log(("Key = %x\n", pPatch->Core.Key));
+ Log(("cbOp = %d\n", pPatch->cbOp));
+ Log(("cbNewOp = %d\n", pPatch->cbNewOp));
+ Log(("type = %d\n", pPatch->enmType));
+ Log(("srcop = %d\n", pPatch->uSrcOperand));
+ Log(("dstop = %d\n", pPatch->uDstOperand));
+ Log(("cFaults = %d\n", pPatch->cFaults));
+ Log(("target = %x\n", pPatch->pJumpTarget));
+ rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
+ AssertRC(rc);
+ }
+ }
+#endif
+
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/VMM/VMMR3/HWACCM.cpp b/src/VBox/VMM/VMMR3/HWACCM.cpp
deleted file mode 100644
index 4fb089b5..00000000
--- a/src/VBox/VMM/VMMR3/HWACCM.cpp
+++ /dev/null
@@ -1,2935 +0,0 @@
-/* $Id: HWACCM.cpp $ */
-/** @file
- * HWACCM - Intel/AMD VM Hardware Support Manager.
- */
-
-/*
- * Copyright (C) 2006-2012 Oracle Corporation
- *
- * This file is part of VirtualBox Open Source Edition (OSE), as
- * available from http://www.virtualbox.org. This file is free software;
- * you can redistribute it and/or modify it under the terms of the GNU
- * General Public License (GPL) as published by the Free Software
- * Foundation, in version 2 as it comes in the "COPYING" file of the
- * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
- * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
- */
-
-/*******************************************************************************
-* Header Files *
-*******************************************************************************/
-#define LOG_GROUP LOG_GROUP_HWACCM
-#include <VBox/vmm/cpum.h>
-#include <VBox/vmm/stam.h>
-#include <VBox/vmm/mm.h>
-#include <VBox/vmm/pdmapi.h>
-#include <VBox/vmm/pgm.h>
-#include <VBox/vmm/ssm.h>
-#include <VBox/vmm/trpm.h>
-#include <VBox/vmm/dbgf.h>
-#include <VBox/vmm/iom.h>
-#include <VBox/vmm/patm.h>
-#include <VBox/vmm/csam.h>
-#include <VBox/vmm/selm.h>
-#ifdef VBOX_WITH_REM
-# include <VBox/vmm/rem.h>
-#endif
-#include <VBox/vmm/hwacc_vmx.h>
-#include <VBox/vmm/hwacc_svm.h>
-#include "HWACCMInternal.h"
-#include <VBox/vmm/vm.h>
-#include <VBox/err.h>
-#include <VBox/param.h>
-
-#include <iprt/assert.h>
-#include <VBox/log.h>
-#include <iprt/asm.h>
-#include <iprt/asm-amd64-x86.h>
-#include <iprt/string.h>
-#include <iprt/env.h>
-#include <iprt/thread.h>
-
-/*******************************************************************************
-* Global Variables *
-*******************************************************************************/
-#ifdef VBOX_WITH_STATISTICS
-# define EXIT_REASON(def, val, str) #def " - " #val " - " str
-# define EXIT_REASON_NIL() NULL
-/** Exit reason descriptions for VT-x, used to describe statistics. */
-static const char * const g_apszVTxExitReasons[MAX_EXITREASON_STAT] =
-{
- EXIT_REASON(VMX_EXIT_EXCEPTION , 0, "Exception or non-maskable interrupt (NMI)."),
- EXIT_REASON(VMX_EXIT_EXTERNAL_IRQ , 1, "External interrupt."),
- EXIT_REASON(VMX_EXIT_TRIPLE_FAULT , 2, "Triple fault."),
- EXIT_REASON(VMX_EXIT_INIT_SIGNAL , 3, "INIT signal."),
- EXIT_REASON(VMX_EXIT_SIPI , 4, "Start-up IPI (SIPI)."),
- EXIT_REASON(VMX_EXIT_IO_SMI_IRQ , 5, "I/O system-management interrupt (SMI)."),
- EXIT_REASON(VMX_EXIT_SMI_IRQ , 6, "Other SMI."),
- EXIT_REASON(VMX_EXIT_IRQ_WINDOW , 7, "Interrupt window."),
- EXIT_REASON_NIL(),
- EXIT_REASON(VMX_EXIT_TASK_SWITCH , 9, "Task switch."),
- EXIT_REASON(VMX_EXIT_CPUID , 10, "Guest software attempted to execute CPUID."),
- EXIT_REASON_NIL(),
- EXIT_REASON(VMX_EXIT_HLT , 12, "Guest software attempted to execute HLT."),
- EXIT_REASON(VMX_EXIT_INVD , 13, "Guest software attempted to execute INVD."),
- EXIT_REASON(VMX_EXIT_INVLPG , 14, "Guest software attempted to execute INVLPG."),
- EXIT_REASON(VMX_EXIT_RDPMC , 15, "Guest software attempted to execute RDPMC."),
- EXIT_REASON(VMX_EXIT_RDTSC , 16, "Guest software attempted to execute RDTSC."),
- EXIT_REASON(VMX_EXIT_RSM , 17, "Guest software attempted to execute RSM in SMM."),
- EXIT_REASON(VMX_EXIT_VMCALL , 18, "Guest software executed VMCALL."),
- EXIT_REASON(VMX_EXIT_VMCLEAR , 19, "Guest software executed VMCLEAR."),
- EXIT_REASON(VMX_EXIT_VMLAUNCH , 20, "Guest software executed VMLAUNCH."),
- EXIT_REASON(VMX_EXIT_VMPTRLD , 21, "Guest software executed VMPTRLD."),
- EXIT_REASON(VMX_EXIT_VMPTRST , 22, "Guest software executed VMPTRST."),
- EXIT_REASON(VMX_EXIT_VMREAD , 23, "Guest software executed VMREAD."),
- EXIT_REASON(VMX_EXIT_VMRESUME , 24, "Guest software executed VMRESUME."),
- EXIT_REASON(VMX_EXIT_VMWRITE , 25, "Guest software executed VMWRITE."),
- EXIT_REASON(VMX_EXIT_VMXOFF , 26, "Guest software executed VMXOFF."),
- EXIT_REASON(VMX_EXIT_VMXON , 27, "Guest software executed VMXON."),
- EXIT_REASON(VMX_EXIT_CRX_MOVE , 28, "Control-register accesses."),
- EXIT_REASON(VMX_EXIT_DRX_MOVE , 29, "Debug-register accesses."),
- EXIT_REASON(VMX_EXIT_PORT_IO , 30, "I/O instruction."),
- EXIT_REASON(VMX_EXIT_RDMSR , 31, "RDMSR. Guest software attempted to execute RDMSR."),
- EXIT_REASON(VMX_EXIT_WRMSR , 32, "WRMSR. Guest software attempted to execute WRMSR."),
- EXIT_REASON(VMX_EXIT_ERR_INVALID_GUEST_STATE, 33, "VM-entry failure due to invalid guest state."),
- EXIT_REASON(VMX_EXIT_ERR_MSR_LOAD , 34, "VM-entry failure due to MSR loading."),
- EXIT_REASON_NIL(),
- EXIT_REASON(VMX_EXIT_MWAIT , 36, "Guest software executed MWAIT."),
- EXIT_REASON(VMX_EXIT_MTF , 37, "Monitor Trap Flag."),
- EXIT_REASON_NIL(),
- EXIT_REASON(VMX_EXIT_MONITOR , 39, "Guest software attempted to execute MONITOR."),
- EXIT_REASON(VMX_EXIT_PAUSE , 40, "Guest software attempted to execute PAUSE."),
- EXIT_REASON(VMX_EXIT_ERR_MACHINE_CHECK , 41, "VM-entry failure due to machine-check."),
- EXIT_REASON_NIL(),
- EXIT_REASON(VMX_EXIT_TPR , 43, "TPR below threshold. Guest software executed MOV to CR8."),
- EXIT_REASON(VMX_EXIT_APIC_ACCESS , 44, "APIC access. Guest software attempted to access memory at a physical address on the APIC-access page."),
- EXIT_REASON_NIL(),
- EXIT_REASON(VMX_EXIT_XDTR_ACCESS , 46, "Access to GDTR or IDTR. Guest software attempted to execute LGDT, LIDT, SGDT, or SIDT."),
- EXIT_REASON(VMX_EXIT_TR_ACCESS , 47, "Access to LDTR or TR. Guest software attempted to execute LLDT, LTR, SLDT, or STR."),
- EXIT_REASON(VMX_EXIT_EPT_VIOLATION , 48, "EPT violation. An attempt to access memory with a guest-physical address was disallowed by the configuration of the EPT paging structures."),
- EXIT_REASON(VMX_EXIT_EPT_MISCONFIG , 49, "EPT misconfiguration. An attempt to access memory with a guest-physical address encountered a misconfigured EPT paging-structure entry."),
- EXIT_REASON(VMX_EXIT_INVEPT , 50, "INVEPT. Guest software attempted to execute INVEPT."),
- EXIT_REASON(VMX_EXIT_RDTSCP , 51, "Guest software attempted to execute RDTSCP."),
- EXIT_REASON(VMX_EXIT_PREEMPTION_TIMER , 52, "VMX-preemption timer expired. The preemption timer counted down to zero."),
- EXIT_REASON(VMX_EXIT_INVVPID , 53, "INVVPID. Guest software attempted to execute INVVPID."),
- EXIT_REASON(VMX_EXIT_WBINVD , 54, "WBINVD. Guest software attempted to execute WBINVD."),
- EXIT_REASON(VMX_EXIT_XSETBV , 55, "XSETBV. Guest software attempted to execute XSETBV."),
- EXIT_REASON_NIL()
-};
-/** Exit reason descriptions for AMD-V, used to describe statistics. */
-static const char * const g_apszAmdVExitReasons[MAX_EXITREASON_STAT] =
-{
- EXIT_REASON(SVM_EXIT_READ_CR0 , 0, "Read CR0."),
- EXIT_REASON(SVM_EXIT_READ_CR1 , 1, "Read CR1."),
- EXIT_REASON(SVM_EXIT_READ_CR2 , 2, "Read CR2."),
- EXIT_REASON(SVM_EXIT_READ_CR3 , 3, "Read CR3."),
- EXIT_REASON(SVM_EXIT_READ_CR4 , 4, "Read CR4."),
- EXIT_REASON(SVM_EXIT_READ_CR5 , 5, "Read CR5."),
- EXIT_REASON(SVM_EXIT_READ_CR6 , 6, "Read CR6."),
- EXIT_REASON(SVM_EXIT_READ_CR7 , 7, "Read CR7."),
- EXIT_REASON(SVM_EXIT_READ_CR8 , 8, "Read CR8."),
- EXIT_REASON(SVM_EXIT_READ_CR9 , 9, "Read CR9."),
- EXIT_REASON(SVM_EXIT_READ_CR10 , 10, "Read CR10."),
- EXIT_REASON(SVM_EXIT_READ_CR11 , 11, "Read CR11."),
- EXIT_REASON(SVM_EXIT_READ_CR12 , 12, "Read CR12."),
- EXIT_REASON(SVM_EXIT_READ_CR13 , 13, "Read CR13."),
- EXIT_REASON(SVM_EXIT_READ_CR14 , 14, "Read CR14."),
- EXIT_REASON(SVM_EXIT_READ_CR15 , 15, "Read CR15."),
- EXIT_REASON(SVM_EXIT_WRITE_CR0 , 16, "Write CR0."),
- EXIT_REASON(SVM_EXIT_WRITE_CR1 , 17, "Write CR1."),
- EXIT_REASON(SVM_EXIT_WRITE_CR2 , 18, "Write CR2."),
- EXIT_REASON(SVM_EXIT_WRITE_CR3 , 19, "Write CR3."),
- EXIT_REASON(SVM_EXIT_WRITE_CR4 , 20, "Write CR4."),
- EXIT_REASON(SVM_EXIT_WRITE_CR5 , 21, "Write CR5."),
- EXIT_REASON(SVM_EXIT_WRITE_CR6 , 22, "Write CR6."),
- EXIT_REASON(SVM_EXIT_WRITE_CR7 , 23, "Write CR7."),
- EXIT_REASON(SVM_EXIT_WRITE_CR8 , 24, "Write CR8."),
- EXIT_REASON(SVM_EXIT_WRITE_CR9 , 25, "Write CR9."),
- EXIT_REASON(SVM_EXIT_WRITE_CR10 , 26, "Write CR10."),
- EXIT_REASON(SVM_EXIT_WRITE_CR11 , 27, "Write CR11."),
- EXIT_REASON(SVM_EXIT_WRITE_CR12 , 28, "Write CR12."),
- EXIT_REASON(SVM_EXIT_WRITE_CR13 , 29, "Write CR13."),
- EXIT_REASON(SVM_EXIT_WRITE_CR14 , 30, "Write CR14."),
- EXIT_REASON(SVM_EXIT_WRITE_CR15 , 31, "Write CR15."),
- EXIT_REASON(SVM_EXIT_READ_DR0 , 32, "Read DR0."),
- EXIT_REASON(SVM_EXIT_READ_DR1 , 33, "Read DR1."),
- EXIT_REASON(SVM_EXIT_READ_DR2 , 34, "Read DR2."),
- EXIT_REASON(SVM_EXIT_READ_DR3 , 35, "Read DR3."),
- EXIT_REASON(SVM_EXIT_READ_DR4 , 36, "Read DR4."),
- EXIT_REASON(SVM_EXIT_READ_DR5 , 37, "Read DR5."),
- EXIT_REASON(SVM_EXIT_READ_DR6 , 38, "Read DR6."),
- EXIT_REASON(SVM_EXIT_READ_DR7 , 39, "Read DR7."),
- EXIT_REASON(SVM_EXIT_READ_DR8 , 40, "Read DR8."),
- EXIT_REASON(SVM_EXIT_READ_DR9 , 41, "Read DR9."),
- EXIT_REASON(SVM_EXIT_READ_DR10 , 42, "Read DR10."),
- EXIT_REASON(SVM_EXIT_READ_DR11 , 43, "Read DR11"),
- EXIT_REASON(SVM_EXIT_READ_DR12 , 44, "Read DR12."),
- EXIT_REASON(SVM_EXIT_READ_DR13 , 45, "Read DR13."),
- EXIT_REASON(SVM_EXIT_READ_DR14 , 46, "Read DR14."),
- EXIT_REASON(SVM_EXIT_READ_DR15 , 47, "Read DR15."),
- EXIT_REASON(SVM_EXIT_WRITE_DR0 , 48, "Write DR0."),
- EXIT_REASON(SVM_EXIT_WRITE_DR1 , 49, "Write DR1."),
- EXIT_REASON(SVM_EXIT_WRITE_DR2 , 50, "Write DR2."),
- EXIT_REASON(SVM_EXIT_WRITE_DR3 , 51, "Write DR3."),
- EXIT_REASON(SVM_EXIT_WRITE_DR4 , 52, "Write DR4."),
- EXIT_REASON(SVM_EXIT_WRITE_DR5 , 53, "Write DR5."),
- EXIT_REASON(SVM_EXIT_WRITE_DR6 , 54, "Write DR6."),
- EXIT_REASON(SVM_EXIT_WRITE_DR7 , 55, "Write DR7."),
- EXIT_REASON(SVM_EXIT_WRITE_DR8 , 56, "Write DR8."),
- EXIT_REASON(SVM_EXIT_WRITE_DR9 , 57, "Write DR9."),
- EXIT_REASON(SVM_EXIT_WRITE_DR10 , 58, "Write DR10."),
- EXIT_REASON(SVM_EXIT_WRITE_DR11 , 59, "Write DR11."),
- EXIT_REASON(SVM_EXIT_WRITE_DR12 , 60, "Write DR12."),
- EXIT_REASON(SVM_EXIT_WRITE_DR13 , 61, "Write DR13."),
- EXIT_REASON(SVM_EXIT_WRITE_DR14 , 62, "Write DR14."),
- EXIT_REASON(SVM_EXIT_WRITE_DR15 , 63, "Write DR15."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_0 , 64, "Exception Vector 0 (0x0)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_1 , 65, "Exception Vector 1 (0x1)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_2 , 66, "Exception Vector 2 (0x2)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_3 , 67, "Exception Vector 3 (0x3)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_4 , 68, "Exception Vector 4 (0x4)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_5 , 69, "Exception Vector 5 (0x5)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_6 , 70, "Exception Vector 6 (0x6)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_7 , 71, "Exception Vector 7 (0x7)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_8 , 72, "Exception Vector 8 (0x8)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_9 , 73, "Exception Vector 9 (0x9)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_A , 74, "Exception Vector 10 (0xA)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_B , 75, "Exception Vector 11 (0xB)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_C , 76, "Exception Vector 12 (0xC)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_D , 77, "Exception Vector 13 (0xD)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_E , 78, "Exception Vector 14 (0xE)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_F , 79, "Exception Vector 15 (0xF)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_10 , 80, "Exception Vector 16 (0x10)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_11 , 81, "Exception Vector 17 (0x11)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_12 , 82, "Exception Vector 18 (0x12)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_13 , 83, "Exception Vector 19 (0x13)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_14 , 84, "Exception Vector 20 (0x14)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_15 , 85, "Exception Vector 22 (0x15)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_16 , 86, "Exception Vector 22 (0x16)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_17 , 87, "Exception Vector 23 (0x17)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_18 , 88, "Exception Vector 24 (0x18)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_19 , 89, "Exception Vector 25 (0x19)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_1A , 90, "Exception Vector 26 (0x1A)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_1B , 91, "Exception Vector 27 (0x1B)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_1C , 92, "Exception Vector 28 (0x1C)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_1D , 93, "Exception Vector 29 (0x1D)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_1E , 94, "Exception Vector 30 (0x1E)."),
- EXIT_REASON(SVM_EXIT_EXCEPTION_1F , 95, "Exception Vector 31 (0x1F)."),
- EXIT_REASON(SVM_EXIT_INTR , 96, "Physical maskable interrupt."),
- EXIT_REASON(SVM_EXIT_NMI , 97, "Physical non-maskable interrupt."),
- EXIT_REASON(SVM_EXIT_SMI , 98, "System management interrupt."),
- EXIT_REASON(SVM_EXIT_INIT , 99, "Physical INIT signal."),
- EXIT_REASON(SVM_EXIT_VINTR ,100, "Virtual interrupt."),
- EXIT_REASON(SVM_EXIT_CR0_SEL_WRITE ,101, "Write to CR0 that changed any bits other than CR0.TS or CR0.MP."),
- EXIT_REASON(SVM_EXIT_IDTR_READ ,102, "Read IDTR"),
- EXIT_REASON(SVM_EXIT_GDTR_READ ,103, "Read GDTR"),
- EXIT_REASON(SVM_EXIT_LDTR_READ ,104, "Read LDTR."),
- EXIT_REASON(SVM_EXIT_TR_READ ,105, "Read TR."),
- EXIT_REASON(SVM_EXIT_TR_READ ,106, "Write IDTR."),
- EXIT_REASON(SVM_EXIT_TR_READ ,107, "Write GDTR."),
- EXIT_REASON(SVM_EXIT_TR_READ ,108, "Write LDTR."),
- EXIT_REASON(SVM_EXIT_TR_READ ,109, "Write TR."),
- EXIT_REASON(SVM_EXIT_RDTSC ,110, "RDTSC instruction."),
- EXIT_REASON(SVM_EXIT_RDPMC ,111, "RDPMC instruction."),
- EXIT_REASON(SVM_EXIT_PUSHF ,112, "PUSHF instruction."),
- EXIT_REASON(SVM_EXIT_POPF ,113, "POPF instruction."),
- EXIT_REASON(SVM_EXIT_CPUID ,114, "CPUID instruction."),
- EXIT_REASON(SVM_EXIT_RSM ,115, "RSM instruction."),
- EXIT_REASON(SVM_EXIT_IRET ,116, "IRET instruction."),
- EXIT_REASON(SVM_EXIT_SWINT ,117, "Software interrupt (INTn instructions)."),
- EXIT_REASON(SVM_EXIT_INVD ,118, "INVD instruction."),
- EXIT_REASON(SVM_EXIT_PAUSE ,119, "PAUSE instruction."),
- EXIT_REASON(SVM_EXIT_HLT ,120, "HLT instruction."),
- EXIT_REASON(SVM_EXIT_INVLPG ,121, "INVLPG instruction."),
- EXIT_REASON(SVM_EXIT_INVLPGA ,122, "INVLPGA instruction."),
- EXIT_REASON(SVM_EXIT_IOIO ,123, "IN/OUT accessing protected port (EXITINFO1 field provides more information)."),
- EXIT_REASON(SVM_EXIT_MSR ,124, "RDMSR or WRMSR access to protected MSR."),
- EXIT_REASON(SVM_EXIT_TASK_SWITCH ,125, "Task switch."),
- EXIT_REASON(SVM_EXIT_FERR_FREEZE ,126, "FP legacy handling enabled, and processor is frozen in an x87/mmx instruction waiting for an interrupt"),
- EXIT_REASON(SVM_EXIT_SHUTDOWN ,127, "Shutdown."),
- EXIT_REASON(SVM_EXIT_VMRUN ,128, "VMRUN instruction."),
- EXIT_REASON(SVM_EXIT_VMMCALL ,129, "VMCALL instruction."),
- EXIT_REASON(SVM_EXIT_VMLOAD ,130, "VMLOAD instruction."),
- EXIT_REASON(SVM_EXIT_VMSAVE ,131, "VMSAVE instruction."),
- EXIT_REASON(SVM_EXIT_STGI ,132, "STGI instruction."),
- EXIT_REASON(SVM_EXIT_CLGI ,133, "CLGI instruction."),
- EXIT_REASON(SVM_EXIT_SKINIT ,134, "SKINIT instruction."),
- EXIT_REASON(SVM_EXIT_RDTSCP ,135, "RDTSCP instruction."),
- EXIT_REASON(SVM_EXIT_ICEBP ,136, "ICEBP instruction."),
- EXIT_REASON(SVM_EXIT_WBINVD ,137, "WBINVD instruction."),
- EXIT_REASON(SVM_EXIT_MONITOR ,138, "MONITOR instruction."),
- EXIT_REASON(SVM_EXIT_MWAIT_UNCOND ,139, "MWAIT instruction unconditional."),
- EXIT_REASON(SVM_EXIT_MWAIT_ARMED ,140, "MWAIT instruction when armed."),
- EXIT_REASON(SVM_EXIT_NPF ,1024, "Nested paging: host-level page fault occurred (EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault)."),
- EXIT_REASON_NIL()
-};
-# undef EXIT_REASON
-# undef EXIT_REASON_NIL
-#endif /* VBOX_WITH_STATISTICS */
-
-/*******************************************************************************
-* Internal Functions *
-*******************************************************************************/
-static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM);
-static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
-static int hwaccmR3InitCPU(PVM pVM);
-static int hwaccmR3InitFinalizeR0(PVM pVM);
-static int hwaccmR3TermCPU(PVM pVM);
-
-
-/**
- * Initializes the HWACCM.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- */
-VMMR3DECL(int) HWACCMR3Init(PVM pVM)
-{
- LogFlow(("HWACCMR3Init\n"));
-
- /*
- * Assert alignment and sizes.
- */
- AssertCompileMemberAlignment(VM, hwaccm.s, 32);
- AssertCompile(sizeof(pVM->hwaccm.s) <= sizeof(pVM->hwaccm.padding));
-
- /* Some structure checks. */
- AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.EventInject) == 0xA8, ("ctrl.EventInject offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.EventInject)));
- AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo) == 0x88, ("ctrl.ExitIntInfo offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo)));
- AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl) == 0x58, ("ctrl.TLBCtrl offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl)));
-
- AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest) == 0x400, ("guest offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest)));
- AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.TR) == 0x490, ("guest.TR offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.TR)));
- AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8CPL) == 0x4CB, ("guest.u8CPL offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8CPL)));
- AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64EFER) == 0x4D0, ("guest.u64EFER offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64EFER)));
- AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64CR4) == 0x548, ("guest.u64CR4 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64CR4)));
- AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64RIP) == 0x578, ("guest.u64RIP offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64RIP)));
- AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64RSP) == 0x5D8, ("guest.u64RSP offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64RSP)));
- AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64CR2) == 0x640, ("guest.u64CR2 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64CR2)));
- AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64GPAT) == 0x668, ("guest.u64GPAT offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64GPAT)));
- AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64LASTEXCPTO) == 0x690, ("guest.u64LASTEXCPTO offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64LASTEXCPTO)));
- AssertReleaseMsg(sizeof(SVM_VMCB) == 0x1000, ("SVM_VMCB size = %x\n", sizeof(SVM_VMCB)));
-
-
- /*
- * Register the saved state data unit.
- */
- int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HWACCM_SSM_VERSION, sizeof(HWACCM),
- NULL, NULL, NULL,
- NULL, hwaccmR3Save, NULL,
- NULL, hwaccmR3Load, NULL);
- if (RT_FAILURE(rc))
- return rc;
-
- /* Misc initialisation. */
- pVM->hwaccm.s.vmx.fSupported = false;
- pVM->hwaccm.s.svm.fSupported = false;
- pVM->hwaccm.s.vmx.fEnabled = false;
- pVM->hwaccm.s.svm.fEnabled = false;
-
- pVM->hwaccm.s.fNestedPaging = false;
- pVM->hwaccm.s.fLargePages = false;
-
- /* Disabled by default. */
- pVM->fHWACCMEnabled = false;
-
- /*
- * Check CFGM options.
- */
- PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
- PCFGMNODE pHWVirtExt = CFGMR3GetChild(pRoot, "HWVirtExt/");
- /* Nested paging: disabled by default. */
- rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableNestedPaging", &pVM->hwaccm.s.fAllowNestedPaging, false);
- AssertRC(rc);
-
- /* Large pages: disabled by default. */
- rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableLargePages", &pVM->hwaccm.s.fLargePages, false);
- AssertRC(rc);
-
- /* VT-x VPID: disabled by default. */
- rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableVPID", &pVM->hwaccm.s.vmx.fAllowVPID, false);
- AssertRC(rc);
-
- /* HWACCM support must be explicitely enabled in the configuration file. */
- rc = CFGMR3QueryBoolDef(pHWVirtExt, "Enabled", &pVM->hwaccm.s.fAllowed, false);
- AssertRC(rc);
-
- /* TPR patching for 32 bits (Windows) guests with IO-APIC: disabled by default. */
- rc = CFGMR3QueryBoolDef(pHWVirtExt, "TPRPatchingEnabled", &pVM->hwaccm.s.fTRPPatchingAllowed, false);
- AssertRC(rc);
-
-#ifdef RT_OS_DARWIN
- if (VMMIsHwVirtExtForced(pVM) != pVM->hwaccm.s.fAllowed)
-#else
- if (VMMIsHwVirtExtForced(pVM) && !pVM->hwaccm.s.fAllowed)
-#endif
- {
- AssertLogRelMsgFailed(("VMMIsHwVirtExtForced=%RTbool fAllowed=%RTbool\n",
- VMMIsHwVirtExtForced(pVM), pVM->hwaccm.s.fAllowed));
- return VERR_HWACCM_CONFIG_MISMATCH;
- }
-
- if (VMMIsHwVirtExtForced(pVM))
- pVM->fHWACCMEnabled = true;
-
-#if HC_ARCH_BITS == 32
- /*
- * 64-bit mode is configurable and it depends on both the kernel mode and VT-x.
- * (To use the default, don't set 64bitEnabled in CFGM.)
- */
- rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hwaccm.s.fAllow64BitGuests, false);
- AssertLogRelRCReturn(rc, rc);
- if (pVM->hwaccm.s.fAllow64BitGuests)
- {
-# ifdef RT_OS_DARWIN
- if (!VMMIsHwVirtExtForced(pVM))
-# else
- if (!pVM->hwaccm.s.fAllowed)
-# endif
- return VM_SET_ERROR(pVM, VERR_INVALID_PARAMETER, "64-bit guest support was requested without also enabling HWVirtEx (VT-x/AMD-V).");
- }
-#else
- /*
- * On 64-bit hosts 64-bit guest support is enabled by default, but allow this to be overridden
- * via VBoxInternal/HWVirtExt/64bitEnabled=0. (ConsoleImpl2.cpp doesn't set this to false for 64-bit.)*
- */
- rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hwaccm.s.fAllow64BitGuests, true);
- AssertLogRelRCReturn(rc, rc);
-#endif
-
-
- /*
- * Determine the init method for AMD-V and VT-x; either one global init for each host CPU
- * or local init each time we wish to execute guest code.
- *
- * Default false for Mac OS X and Windows due to the higher risk of conflicts with other hypervisors.
- */
- rc = CFGMR3QueryBoolDef(pHWVirtExt, "Exclusive", &pVM->hwaccm.s.fGlobalInit,
-#if defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS)
- false
-#else
- true
-#endif
- );
-
- /* Max number of resume loops. */
- rc = CFGMR3QueryU32Def(pHWVirtExt, "MaxResumeLoops", &pVM->hwaccm.s.cMaxResumeLoops, 0 /* set by R0 later */);
- AssertRC(rc);
-
- return rc;
-}
-
-
-/**
- * Initializes the per-VCPU HWACCM.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- */
-static int hwaccmR3InitCPU(PVM pVM)
-{
- LogFlow(("HWACCMR3InitCPU\n"));
-
- for (VMCPUID i = 0; i < pVM->cCpus; i++)
- {
- PVMCPU pVCpu = &pVM->aCpus[i];
-
- pVCpu->hwaccm.s.fActive = false;
- }
-
-#ifdef VBOX_WITH_STATISTICS
- STAM_REG(pVM, &pVM->hwaccm.s.StatTPRPatchSuccess, STAMTYPE_COUNTER, "/HWACCM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
- STAM_REG(pVM, &pVM->hwaccm.s.StatTPRPatchFailure, STAMTYPE_COUNTER, "/HWACCM/TPR/Patch/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
- STAM_REG(pVM, &pVM->hwaccm.s.StatTPRReplaceSuccess, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Success",STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
- STAM_REG(pVM, &pVM->hwaccm.s.StatTPRReplaceFailure, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
-
- /*
- * Statistics.
- */
- for (VMCPUID i = 0; i < pVM->cCpus; i++)
- {
- PVMCPU pVCpu = &pVM->aCpus[i];
- int rc;
-
- rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of RTMpPokeCpu",
- "/PROF/HWACCM/CPU%d/Poke", i);
- AssertRC(rc);
- rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatSpinPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait",
- "/PROF/HWACCM/CPU%d/PokeWait", i);
- AssertRC(rc);
- rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatSpinPokeFailed, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait when RTMpPokeCpu fails",
- "/PROF/HWACCM/CPU%d/PokeWaitFailed", i);
- AssertRC(rc);
- rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatEntry, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode entry",
- "/PROF/HWACCM/CPU%d/SwitchToGC", i);
- AssertRC(rc);
- rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 1",
- "/PROF/HWACCM/CPU%d/SwitchFromGC_1", i);
- AssertRC(rc);
- rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 2",
- "/PROF/HWACCM/CPU%d/SwitchFromGC_2", i);
- AssertRC(rc);
-# if 1 /* temporary for tracking down darwin holdup. */
- rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - I/O",
- "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub1", i);
- AssertRC(rc);
- rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - CRx RWs",
- "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub2", i);
- AssertRC(rc);
- rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - Exceptions",
- "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub3", i);
- AssertRC(rc);
-# endif
- rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatInGC, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of vmlaunch",
- "/PROF/HWACCM/CPU%d/InGC", i);
- AssertRC(rc);
-
-# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
- rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatWorldSwitch3264, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of the 32/64 switcher",
- "/PROF/HWACCM/CPU%d/Switcher3264", i);
- AssertRC(rc);
-# endif
-
-# define HWACCM_REG_COUNTER(a, b) \
- rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Profiling of vmlaunch", b, i); \
- AssertRC(rc);
-
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowNM, "/HWACCM/CPU%d/Exit/Trap/Shw/#NM");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNM, "/HWACCM/CPU%d/Exit/Trap/Gst/#NM");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowPF, "/HWACCM/CPU%d/Exit/Trap/Shw/#PF");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowPFEM, "/HWACCM/CPU%d/Exit/Trap/Shw/#PF-EM");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestPF, "/HWACCM/CPU%d/Exit/Trap/Gst/#PF");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestUD, "/HWACCM/CPU%d/Exit/Trap/Gst/#UD");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestSS, "/HWACCM/CPU%d/Exit/Trap/Gst/#SS");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNP, "/HWACCM/CPU%d/Exit/Trap/Gst/#NP");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestGP, "/HWACCM/CPU%d/Exit/Trap/Gst/#GP");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestMF, "/HWACCM/CPU%d/Exit/Trap/Gst/#MF");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDE, "/HWACCM/CPU%d/Exit/Trap/Gst/#DE");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDB, "/HWACCM/CPU%d/Exit/Trap/Gst/#DB");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestBP, "/HWACCM/CPU%d/Exit/Trap/Gst/#BP");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestXF, "/HWACCM/CPU%d/Exit/Trap/Gst/#XF");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestXcpUnk, "/HWACCM/CPU%d/Exit/Trap/Gst/Other");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvlpg, "/HWACCM/CPU%d/Exit/Instr/Invlpg");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvd, "/HWACCM/CPU%d/Exit/Instr/Invd");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCpuid, "/HWACCM/CPU%d/Exit/Instr/Cpuid");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdtsc, "/HWACCM/CPU%d/Exit/Instr/Rdtsc");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdtscp, "/HWACCM/CPU%d/Exit/Instr/Rdtscp");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdpmc, "/HWACCM/CPU%d/Exit/Instr/Rdpmc");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdmsr, "/HWACCM/CPU%d/Exit/Instr/Rdmsr");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitWrmsr, "/HWACCM/CPU%d/Exit/Instr/Wrmsr");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMwait, "/HWACCM/CPU%d/Exit/Instr/Mwait");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMonitor, "/HWACCM/CPU%d/Exit/Instr/Monitor");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxWrite, "/HWACCM/CPU%d/Exit/Instr/DR/Write");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxRead, "/HWACCM/CPU%d/Exit/Instr/DR/Read");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCLTS, "/HWACCM/CPU%d/Exit/Instr/CLTS");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitLMSW, "/HWACCM/CPU%d/Exit/Instr/LMSW");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCli, "/HWACCM/CPU%d/Exit/Instr/Cli");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitSti, "/HWACCM/CPU%d/Exit/Instr/Sti");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPushf, "/HWACCM/CPU%d/Exit/Instr/Pushf");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPopf, "/HWACCM/CPU%d/Exit/Instr/Popf");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIret, "/HWACCM/CPU%d/Exit/Instr/Iret");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInt, "/HWACCM/CPU%d/Exit/Instr/Int");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitHlt, "/HWACCM/CPU%d/Exit/Instr/Hlt");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOWrite, "/HWACCM/CPU%d/Exit/IO/Write");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIORead, "/HWACCM/CPU%d/Exit/IO/Read");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringWrite, "/HWACCM/CPU%d/Exit/IO/WriteString");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringRead, "/HWACCM/CPU%d/Exit/IO/ReadString");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIrqWindow, "/HWACCM/CPU%d/Exit/IrqWindow");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMaxResume, "/HWACCM/CPU%d/Exit/MaxResume");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPreemptPending, "/HWACCM/CPU%d/Exit/PreemptPending");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMTF, "/HWACCM/CPU%d/Exit/MonitorTrapFlag");
-
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchGuestIrq, "/HWACCM/CPU%d/Switch/IrqPending");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchToR3, "/HWACCM/CPU%d/Switch/ToR3");
-
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntInject, "/HWACCM/CPU%d/Irq/Inject");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntReinject, "/HWACCM/CPU%d/Irq/Reinject");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatPendingHostIrq, "/HWACCM/CPU%d/Irq/PendingOnHost");
-
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPage, "/HWACCM/CPU%d/Flush/Page");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageManual, "/HWACCM/CPU%d/Flush/Page/Virt");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPhysPageManual, "/HWACCM/CPU%d/Flush/Page/Phys");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLB, "/HWACCM/CPU%d/Flush/TLB");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBManual, "/HWACCM/CPU%d/Flush/TLB/Manual");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBCRxChange, "/HWACCM/CPU%d/Flush/TLB/CRx");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageInvlpg, "/HWACCM/CPU%d/Flush/Page/Invlpg");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch, "/HWACCM/CPU%d/Flush/TLB/Switch");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch, "/HWACCM/CPU%d/Flush/TLB/Skipped");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushASID, "/HWACCM/CPU%d/Flush/TLB/ASID");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBInvlpga, "/HWACCM/CPU%d/Flush/TLB/PhysInvl");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdown, "/HWACCM/CPU%d/Flush/Shootdown/Page");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdownFlush, "/HWACCM/CPU%d/Flush/Shootdown/TLB");
-
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCOffset, "/HWACCM/CPU%d/TSC/Offset");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCIntercept, "/HWACCM/CPU%d/TSC/Intercept");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCInterceptOverFlow, "/HWACCM/CPU%d/TSC/InterceptOverflow");
-
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxArmed, "/HWACCM/CPU%d/Debug/Armed");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxContextSwitch, "/HWACCM/CPU%d/Debug/ContextSwitch");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxIOCheck, "/HWACCM/CPU%d/Debug/IOCheck");
-
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatLoadMinimal, "/HWACCM/CPU%d/Load/Minimal");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatLoadFull, "/HWACCM/CPU%d/Load/Full");
-
-#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFpu64SwitchBack, "/HWACCM/CPU%d/Switch64/Fpu");
- HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDebug64SwitchBack, "/HWACCM/CPU%d/Switch64/Debug");
-#endif
-
- for (unsigned j = 0; j < RT_ELEMENTS(pVCpu->hwaccm.s.StatExitCRxWrite); j++)
- {
- rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitCRxWrite[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx writes",
- "/HWACCM/CPU%d/Exit/Instr/CR/Write/%x", i, j);
- AssertRC(rc);
- rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitCRxRead[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx reads",
- "/HWACCM/CPU%d/Exit/Instr/CR/Read/%x", i, j);
- AssertRC(rc);
- }
-
-#undef HWACCM_REG_COUNTER
-
- pVCpu->hwaccm.s.paStatExitReason = NULL;
-
- rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT*sizeof(*pVCpu->hwaccm.s.paStatExitReason), 0, MM_TAG_HWACCM, (void **)&pVCpu->hwaccm.s.paStatExitReason);
- AssertRC(rc);
- if (RT_SUCCESS(rc))
- {
- const char * const *papszDesc = ASMIsIntelCpu() ? &g_apszVTxExitReasons[0] : &g_apszAmdVExitReasons[0];
- for (int j = 0; j < MAX_EXITREASON_STAT; j++)
- {
- if (papszDesc[j])
- {
- rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
- papszDesc[j], "/HWACCM/CPU%d/Exit/Reason/%02x", i, j);
- AssertRC(rc);
- }
- }
- rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitReasonNPF, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Nested page fault", "/HWACCM/CPU%d/Exit/Reason/#NPF", i);
- AssertRC(rc);
- }
- pVCpu->hwaccm.s.paStatExitReasonR0 = MMHyperR3ToR0(pVM, pVCpu->hwaccm.s.paStatExitReason);
-# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
- Assert(pVCpu->hwaccm.s.paStatExitReasonR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
-# else
- Assert(pVCpu->hwaccm.s.paStatExitReasonR0 != NIL_RTR0PTR);
-# endif
-
- rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, 8, MM_TAG_HWACCM, (void **)&pVCpu->hwaccm.s.paStatInjectedIrqs);
- AssertRCReturn(rc, rc);
- pVCpu->hwaccm.s.paStatInjectedIrqsR0 = MMHyperR3ToR0(pVM, pVCpu->hwaccm.s.paStatInjectedIrqs);
-# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
- Assert(pVCpu->hwaccm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
-# else
- Assert(pVCpu->hwaccm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR);
-# endif
- for (unsigned j = 0; j < 255; j++)
- STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.paStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Forwarded interrupts.",
- (j < 0x20) ? "/HWACCM/CPU%d/Interrupt/Trap/%02X" : "/HWACCM/CPU%d/Interrupt/IRQ/%02X", i, j);
-
- }
-#endif /* VBOX_WITH_STATISTICS */
-
-#ifdef VBOX_WITH_CRASHDUMP_MAGIC
- /* Magic marker for searching in crash dumps. */
- for (VMCPUID i = 0; i < pVM->cCpus; i++)
- {
- PVMCPU pVCpu = &pVM->aCpus[i];
-
- PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
- strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
- pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
- }
-#endif
- return VINF_SUCCESS;
-}
-
-
-/**
- * Called when a init phase has completed.
- *
- * @returns VBox status code.
- * @param pVM The VM.
- * @param enmWhat The phase that completed.
- */
-VMMR3_INT_DECL(int) HWACCMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
-{
- switch (enmWhat)
- {
- case VMINITCOMPLETED_RING3:
- return hwaccmR3InitCPU(pVM);
- case VMINITCOMPLETED_RING0:
- return hwaccmR3InitFinalizeR0(pVM);
- default:
- return VINF_SUCCESS;
- }
-}
-
-
-/**
- * Turns off normal raw mode features.
- *
- * @param pVM Pointer to the VM.
- */
-static void hwaccmR3DisableRawMode(PVM pVM)
-{
- /* Disable PATM & CSAM. */
- PATMR3AllowPatching(pVM, false);
- CSAMDisableScanning(pVM);
-
- /* Turn off IDT/LDT/GDT and TSS monitoring and sycing. */
- SELMR3DisableMonitoring(pVM);
- TRPMR3DisableMonitoring(pVM);
-
- /* Disable the switcher code (safety precaution). */
- VMMR3DisableSwitcher(pVM);
-
- /* Disable mapping of the hypervisor into the shadow page table. */
- PGMR3MappingsDisable(pVM);
-
- /* Disable the switcher */
- VMMR3DisableSwitcher(pVM);
-
- /* Reinit the paging mode to force the new shadow mode. */
- for (VMCPUID i = 0; i < pVM->cCpus; i++)
- {
- PVMCPU pVCpu = &pVM->aCpus[i];
-
- PGMR3ChangeMode(pVM, pVCpu, PGMMODE_REAL);
- }
-}
-
-
-/**
- * Initialize VT-x or AMD-V.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- */
-static int hwaccmR3InitFinalizeR0(PVM pVM)
-{
- int rc;
-
- /*
- * Hack to allow users to work around broken BIOSes that incorrectly set EFER.SVME, which makes us believe somebody else
- * is already using AMD-V.
- */
- if ( !pVM->hwaccm.s.vmx.fSupported
- && !pVM->hwaccm.s.svm.fSupported
- && pVM->hwaccm.s.lLastError == VERR_SVM_IN_USE /* implies functional AMD-V */
- && RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE"))
- {
- LogRel(("HWACCM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n"));
- pVM->hwaccm.s.svm.fSupported = true;
- pVM->hwaccm.s.svm.fIgnoreInUseError = true;
- }
- else
- if ( !pVM->hwaccm.s.vmx.fSupported
- && !pVM->hwaccm.s.svm.fSupported)
- {
- LogRel(("HWACCM: No VT-x or AMD-V CPU extension found. Reason %Rrc\n", pVM->hwaccm.s.lLastError));
- LogRel(("HWACCM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
-
- if (VMMIsHwVirtExtForced(pVM))
- {
- switch (pVM->hwaccm.s.lLastError)
- {
- case VERR_VMX_NO_VMX:
- return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is not available.");
- case VERR_VMX_IN_VMX_ROOT_MODE:
- return VM_SET_ERROR(pVM, VERR_VMX_IN_VMX_ROOT_MODE, "VT-x is being used by another hypervisor.");
- case VERR_SVM_IN_USE:
- return VM_SET_ERROR(pVM, VERR_SVM_IN_USE, "AMD-V is being used by another hypervisor.");
- case VERR_SVM_NO_SVM:
- return VM_SET_ERROR(pVM, VERR_SVM_NO_SVM, "AMD-V is not available.");
- case VERR_SVM_DISABLED:
- return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS.");
- default:
- return pVM->hwaccm.s.lLastError;
- }
- }
- return VINF_SUCCESS;
- }
-
- if (pVM->hwaccm.s.vmx.fSupported)
- {
- rc = SUPR3QueryVTxSupported();
- if (RT_FAILURE(rc))
- {
-#ifdef RT_OS_LINUX
- LogRel(("HWACCM: The host kernel does not support VT-x -- Linux 2.6.13 or newer required!\n"));
-#else
- LogRel(("HWACCM: The host kernel does not support VT-x!\n"));
-#endif
- if ( pVM->cCpus > 1
- || VMMIsHwVirtExtForced(pVM))
- return rc;
-
- /* silently fall back to raw mode */
- return VINF_SUCCESS;
- }
- }
-
- if (!pVM->hwaccm.s.fAllowed)
- return VINF_SUCCESS; /* nothing to do */
-
- /* Enable VT-x or AMD-V on all host CPUs. */
- rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_ENABLE, 0, NULL);
- if (RT_FAILURE(rc))
- {
- LogRel(("HWACCMR3InitFinalize: SUPR3CallVMMR0Ex VMMR0_DO_HWACC_ENABLE failed with %Rrc\n", rc));
- return rc;
- }
- Assert(!pVM->fHWACCMEnabled || VMMIsHwVirtExtForced(pVM));
-
- pVM->hwaccm.s.fHasIoApic = PDMHasIoApic(pVM);
- /* No TPR patching is required when the IO-APIC is not enabled for this VM. (Main should have taken care of this already) */
- if (!pVM->hwaccm.s.fHasIoApic)
- {
- Assert(!pVM->hwaccm.s.fTRPPatchingAllowed); /* paranoia */
- pVM->hwaccm.s.fTRPPatchingAllowed = false;
- }
-
- bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
- if (pVM->hwaccm.s.vmx.fSupported)
- {
- Log(("pVM->hwaccm.s.vmx.fSupported = %d\n", pVM->hwaccm.s.vmx.fSupported));
-
- if ( pVM->hwaccm.s.fInitialized == false
- && pVM->hwaccm.s.vmx.msr.feature_ctrl != 0)
- {
- uint64_t val;
- RTGCPHYS GCPhys = 0;
-
- LogRel(("HWACCM: Host CR4=%08X\n", pVM->hwaccm.s.vmx.hostCR4));
- LogRel(("HWACCM: MSR_IA32_FEATURE_CONTROL = %RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
- LogRel(("HWACCM: MSR_IA32_VMX_BASIC_INFO = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_basic_info));
- LogRel(("HWACCM: VMCS id = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
- LogRel(("HWACCM: VMCS size = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
- LogRel(("HWACCM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hwaccm.s.vmx.msr.vmx_basic_info) ? "< 4 GB" : "None"));
- LogRel(("HWACCM: VMCS memory type = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
- LogRel(("HWACCM: Dual monitor treatment = %d\n", MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
-
- LogRel(("HWACCM: MSR_IA32_VMX_PINBASED_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.u));
- val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
- if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI\n"));
- if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER\n"));
- val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
- if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER *must* be set\n"));
-
- LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.u));
- val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL\n"));
-
- val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL *must* be set\n"));
-
- if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
- {
- LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS2 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.u));
- val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_EPT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VPID\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT\n"));
-
- val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_EPT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VPID *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE *must* be set\n"));
- if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT)
- LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT *must* be set\n"));
- }
-
- LogRel(("HWACCM: MSR_IA32_VMX_ENTRY_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_entry.u));
- val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1;
- if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
- LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG\n"));
- if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
- LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE\n"));
- if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
- LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM\n"));
- if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
- LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON\n"));
- if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
- LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR\n"));
- if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
- LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR\n"));
- if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
- LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR\n"));
- val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0;
- if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
- LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG *must* be set\n"));
- if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
- LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE *must* be set\n"));
- if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
- LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM *must* be set\n"));
- if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
- LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON *must* be set\n"));
- if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
- LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR *must* be set\n"));
- if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
- LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR *must* be set\n"));
- if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
- LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR *must* be set\n"));
-
- LogRel(("HWACCM: MSR_IA32_VMX_EXIT_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_exit.u));
- val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1;
- if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
- LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG\n"));
- if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
- LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64\n"));
- if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
- LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ\n"));
- if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
- LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR\n"));
- if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
- LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR\n"));
- if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
- LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR\n"));
- if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
- LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR\n"));
- if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
- LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER\n"));
- val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0;
- if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
- LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG *must* be set\n"));
- if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
- LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64 *must* be set\n"));
- if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
- LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ *must* be set\n"));
- if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
- LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR *must* be set\n"));
- if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
- LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR *must* be set\n"));
- if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
- LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR *must* be set\n"));
- if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
- LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR *must* be set\n"));
- if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
- LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER *must* be set\n"));
-
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps)
- {
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_VPID_CAPS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_eptcaps));
-
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_UC)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_UC\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WC)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WC\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WT)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WT\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WP)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WP\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WB)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WB\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_21_BITS)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_21_BITS\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_30_BITS)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_30_BITS\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_39_BITS)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_39_BITS\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_48_BITS)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_48_BITS\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS\n"));
- if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS)
- LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS\n"));
- }
-
- LogRel(("HWACCM: MSR_IA32_VMX_MISC = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_misc));
- if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc) == pVM->hwaccm.s.vmx.cPreemptTimerShift)
- LogRel(("HWACCM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc)));
- else
- {
- LogRel(("HWACCM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x - erratum detected, using %x instead\n",
- MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc), pVM->hwaccm.s.vmx.cPreemptTimerShift));
- }
- LogRel(("HWACCM: MSR_IA32_VMX_MISC_ACTIVITY_STATES %x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hwaccm.s.vmx.msr.vmx_misc)));
- LogRel(("HWACCM: MSR_IA32_VMX_MISC_CR3_TARGET %x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hwaccm.s.vmx.msr.vmx_misc)));
- LogRel(("HWACCM: MSR_IA32_VMX_MISC_MAX_MSR %x\n", MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc)));
- LogRel(("HWACCM: MSR_IA32_VMX_MISC_MSEG_ID %x\n", MSR_IA32_VMX_MISC_MSEG_ID(pVM->hwaccm.s.vmx.msr.vmx_misc)));
-
- LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED0 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0));
- LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED1 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1));
- LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED0 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0));
- LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED1 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1));
- LogRel(("HWACCM: MSR_IA32_VMX_VMCS_ENUM = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum));
-
- LogRel(("HWACCM: TPR shadow physaddr = %RHp\n", pVM->hwaccm.s.vmx.pAPICPhys));
-
- /* Paranoia */
- AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc) >= 512);
-
- for (VMCPUID i = 0; i < pVM->cCpus; i++)
- {
- LogRel(("HWACCM: VCPU%d: MSR bitmap physaddr = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pMSRBitmapPhys));
- LogRel(("HWACCM: VCPU%d: VMCS physaddr = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.HCPhysVMCS));
- }
-
- if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
- pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;
-
- if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
- pVM->hwaccm.s.vmx.fVPID = pVM->hwaccm.s.vmx.fAllowVPID;
-
- /*
- * Disallow RDTSCP in the guest if there is no secondary process-based VM execution controls as otherwise
- * RDTSCP would cause a #UD. There might be no CPUs out there where this happens, as RDTSCP was introduced
- * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel...
- */
- if (!(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
- && CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP))
- {
- CPUMClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
- }
-
- /* Unrestricted guest execution relies on EPT. */
- if ( pVM->hwaccm.s.fNestedPaging
- && (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE))
- {
- pVM->hwaccm.s.vmx.fUnrestrictedGuest = true;
- }
-
- /* Only try once. */
- pVM->hwaccm.s.fInitialized = true;
-
- if (!pVM->hwaccm.s.vmx.fUnrestrictedGuest)
- {
- /* Allocate three pages for the TSS we need for real mode emulation. (2 pages for the IO bitmap) */
- rc = PDMR3VMMDevHeapAlloc(pVM, HWACCM_VTX_TOTAL_DEVHEAP_MEM, (RTR3PTR *)&pVM->hwaccm.s.vmx.pRealModeTSS);
- if (RT_SUCCESS(rc))
- {
- /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. */
- ASMMemZero32(pVM->hwaccm.s.vmx.pRealModeTSS, sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS));
- pVM->hwaccm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS);
- /* Bit set to 0 means redirection enabled. */
- memset(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap, 0x0, sizeof(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap));
- /* Allow all port IO, so the VT-x IO intercepts do their job. */
- memset(pVM->hwaccm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE*2);
- *((unsigned char *)pVM->hwaccm.s.vmx.pRealModeTSS + HWACCM_VTX_TSS_SIZE - 2) = 0xff;
-
- /*
- * Construct a 1024 element page directory with 4 MB pages for the identity mapped page table used in
- * real and protected mode without paging with EPT.
- */
- pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hwaccm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);
- for (unsigned i = 0; i < X86_PG_ENTRIES; i++)
- {
- pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i;
- pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_G;
- }
-
- /* We convert it here every time as pci regions could be reconfigured. */
- rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pRealModeTSS, &GCPhys);
- AssertRC(rc);
- LogRel(("HWACCM: Real Mode TSS guest physaddr = %RGp\n", GCPhys));
-
- rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
- AssertRC(rc);
- LogRel(("HWACCM: Non-Paging Mode EPT CR3 = %RGp\n", GCPhys));
- }
- else
- {
- LogRel(("HWACCM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
- pVM->hwaccm.s.vmx.pRealModeTSS = NULL;
- pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable = NULL;
- }
- }
-
- rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
- AssertRC(rc);
- if (rc == VINF_SUCCESS)
- {
- pVM->fHWACCMEnabled = true;
- pVM->hwaccm.s.vmx.fEnabled = true;
- hwaccmR3DisableRawMode(pVM);
-
- CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
-#ifdef VBOX_ENABLE_64_BITS_GUESTS
- if (pVM->hwaccm.s.fAllow64BitGuests)
- {
- CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
- CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
- CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* 64 bits only on Intel CPUs */
- CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
- CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
- }
- else
- /* Turn on NXE if PAE has been enabled *and* the host has turned on NXE (we reuse the host EFER in the switcher) */
- /* Todo: this needs to be fixed properly!! */
- if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE)
- && (pVM->hwaccm.s.vmx.hostEFER & MSR_K6_EFER_NXE))
- CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
-
- LogRel((pVM->hwaccm.s.fAllow64BitGuests
- ? "HWACCM: 32-bit and 64-bit guests supported.\n"
- : "HWACCM: 32-bit guests supported.\n"));
-#else
- LogRel(("HWACCM: 32-bit guests supported.\n"));
-#endif
- LogRel(("HWACCM: VMX enabled!\n"));
- if (pVM->hwaccm.s.fNestedPaging)
- {
- LogRel(("HWACCM: Enabled nested paging\n"));
- LogRel(("HWACCM: EPT root page = %RHp\n", PGMGetHyperCR3(VMMGetCpu(pVM))));
- if (pVM->hwaccm.s.vmx.enmFlushEPT == VMX_FLUSH_EPT_SINGLE_CONTEXT)
- LogRel(("HWACCM: enmFlushEPT = VMX_FLUSH_EPT_SINGLE_CONTEXT\n"));
- else if (pVM->hwaccm.s.vmx.enmFlushEPT == VMX_FLUSH_EPT_ALL_CONTEXTS)
- LogRel(("HWACCM: enmFlushEPT = VMX_FLUSH_EPT_ALL_CONTEXTS\n"));
- else if (pVM->hwaccm.s.vmx.enmFlushEPT == VMX_FLUSH_EPT_NOT_SUPPORTED)
- LogRel(("HWACCM: enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED\n"));
- else
- LogRel(("HWACCM: enmFlushEPT = %d\n", pVM->hwaccm.s.vmx.enmFlushEPT));
-
- if (pVM->hwaccm.s.vmx.fUnrestrictedGuest)
- LogRel(("HWACCM: Unrestricted guest execution enabled!\n"));
-
-#if HC_ARCH_BITS == 64
- if (pVM->hwaccm.s.fLargePages)
- {
- /* Use large (2 MB) pages for our EPT PDEs where possible. */
- PGMSetLargePageUsage(pVM, true);
- LogRel(("HWACCM: Large page support enabled!\n"));
- }
-#endif
- }
- else
- Assert(!pVM->hwaccm.s.vmx.fUnrestrictedGuest);
-
- if (pVM->hwaccm.s.vmx.fVPID)
- {
- LogRel(("HWACCM: Enabled VPID\n"));
- if (pVM->hwaccm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_INDIV_ADDR)
- LogRel(("HWACCM: enmFlushVPID = VMX_FLUSH_VPID_INDIV_ADDR\n"));
- else if (pVM->hwaccm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_SINGLE_CONTEXT)
- LogRel(("HWACCM: enmFlushVPID = VMX_FLUSH_VPID_SINGLE_CONTEXT\n"));
- else if (pVM->hwaccm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_ALL_CONTEXTS)
- LogRel(("HWACCM: enmFlushVPID = VMX_FLUSH_VPID_ALL_CONTEXTS\n"));
- else if (pVM->hwaccm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
- LogRel(("HWACCM: enmFlushVPID = VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS\n"));
- else
- LogRel(("HWACCM: enmFlushVPID = %d\n", pVM->hwaccm.s.vmx.enmFlushVPID));
- }
- else if (pVM->hwaccm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_NOT_SUPPORTED)
- LogRel(("HWACCM: Ignoring VPID capabilities of CPU.\n"));
-
- /* TPR patching status logging. */
- if (pVM->hwaccm.s.fTRPPatchingAllowed)
- {
- if ( (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
- && (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
- {
- pVM->hwaccm.s.fTRPPatchingAllowed = false; /* not necessary as we have a hardware solution. */
- LogRel(("HWACCM: TPR Patching not required (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).\n"));
- }
- else
- {
- uint32_t u32Eax, u32Dummy;
-
- /* TPR patching needs access to the MSR_K8_LSTAR msr. */
- ASMCpuId(0x80000000, &u32Eax, &u32Dummy, &u32Dummy, &u32Dummy);
- if ( u32Eax < 0x80000001
- || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
- {
- pVM->hwaccm.s.fTRPPatchingAllowed = false;
- LogRel(("HWACCM: TPR patching disabled (long mode not supported).\n"));
- }
- }
- }
- LogRel(("HWACCM: TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
-
- /*
- * Check for preemption timer config override and log the state of it.
- */
- if (pVM->hwaccm.s.vmx.fUsePreemptTimer)
- {
- PCFGMNODE pCfgHwAccM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HWACCM");
- int rc2 = CFGMR3QueryBoolDef(pCfgHwAccM, "UsePreemptTimer", &pVM->hwaccm.s.vmx.fUsePreemptTimer, true);
- AssertLogRelRC(rc2);
- }
- if (pVM->hwaccm.s.vmx.fUsePreemptTimer)
- LogRel(("HWACCM: Using the VMX-preemption timer (cPreemptTimerShift=%u)\n", pVM->hwaccm.s.vmx.cPreemptTimerShift));
- }
- else
- {
- LogRel(("HWACCM: VMX setup failed with rc=%Rrc!\n", rc));
- LogRel(("HWACCM: Last instruction error %x\n", pVM->aCpus[0].hwaccm.s.vmx.lasterror.ulInstrError));
- pVM->fHWACCMEnabled = false;
- }
- }
- }
- else
- if (pVM->hwaccm.s.svm.fSupported)
- {
- Log(("pVM->hwaccm.s.svm.fSupported = %d\n", pVM->hwaccm.s.svm.fSupported));
-
- if (pVM->hwaccm.s.fInitialized == false)
- {
- /* Erratum 170 which requires a forced TLB flush for each world switch:
- * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
- *
- * All BH-G1/2 and DH-G1/2 models include a fix:
- * Athlon X2: 0x6b 1/2
- * 0x68 1/2
- * Athlon 64: 0x7f 1
- * 0x6f 2
- * Sempron: 0x7f 1/2
- * 0x6f 2
- * 0x6c 2
- * 0x7c 2
- * Turion 64: 0x68 2
- *
- */
- uint32_t u32Dummy;
- uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
- ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
- u32BaseFamily= (u32Version >> 8) & 0xf;
- u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
- u32Model = ((u32Version >> 4) & 0xf);
- u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
- u32Stepping = u32Version & 0xf;
- if ( u32Family == 0xf
- && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
- && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
- {
- LogRel(("HWACMM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
- }
-
- LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureECX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureECX));
- LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureEDX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureEDX));
- LogRel(("HWACCM: AMD HWCR MSR = %RX64\n", pVM->hwaccm.s.svm.msrHWCR));
- LogRel(("HWACCM: AMD-V revision = %X\n", pVM->hwaccm.s.svm.u32Rev));
- LogRel(("HWACCM: AMD-V max ASID = %d\n", pVM->hwaccm.s.uMaxASID));
- LogRel(("HWACCM: AMD-V features = %X\n", pVM->hwaccm.s.svm.u32Features));
- static const struct { uint32_t fFlag; const char *pszName; } s_aSvmFeatures[] =
- {
-#define FLAG_NAME(a_Define) { a_Define, #a_Define }
- FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
- FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT),
- FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK),
- FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE),
- FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR),
- FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN),
- FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID),
- FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST),
- FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_SSE_3_5_DISABLE),
- FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
- FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
-#undef FLAG_NAME
- };
- uint32_t fSvmFeatures = pVM->hwaccm.s.svm.u32Features;
- for (unsigned i = 0; i < RT_ELEMENTS(s_aSvmFeatures); i++)
- if (fSvmFeatures & s_aSvmFeatures[i].fFlag)
- {
- LogRel(("HWACCM: %s\n", s_aSvmFeatures[i].pszName));
- fSvmFeatures &= ~s_aSvmFeatures[i].fFlag;
- }
- if (fSvmFeatures)
- for (unsigned iBit = 0; iBit < 32; iBit++)
- if (RT_BIT_32(iBit) & fSvmFeatures)
- LogRel(("HWACCM: Reserved bit %u\n", iBit));
-
- /* Only try once. */
- pVM->hwaccm.s.fInitialized = true;
-
- if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
- pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;
-
- rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
- AssertRC(rc);
- if (rc == VINF_SUCCESS)
- {
- pVM->fHWACCMEnabled = true;
- pVM->hwaccm.s.svm.fEnabled = true;
-
- if (pVM->hwaccm.s.fNestedPaging)
- {
- LogRel(("HWACCM: Enabled nested paging\n"));
-#if HC_ARCH_BITS == 64
- if (pVM->hwaccm.s.fLargePages)
- {
- /* Use large (2 MB) pages for our nested paging PDEs where possible. */
- PGMSetLargePageUsage(pVM, true);
- LogRel(("HWACCM: Large page support enabled!\n"));
- }
-#endif
- }
-
- hwaccmR3DisableRawMode(pVM);
- CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
- CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
-#ifdef VBOX_ENABLE_64_BITS_GUESTS
- if (pVM->hwaccm.s.fAllow64BitGuests)
- {
- CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
- CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
- CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
- CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
- }
- else
- /* Turn on NXE if PAE has been enabled. */
- if (CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
- CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
-#endif
-
- LogRel((pVM->hwaccm.s.fAllow64BitGuests
- ? "HWACCM: 32-bit and 64-bit guest supported.\n"
- : "HWACCM: 32-bit guest supported.\n"));
-
- LogRel(("HWACCM: TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
- }
- else
- {
- pVM->fHWACCMEnabled = false;
- }
- }
- }
- if (pVM->fHWACCMEnabled)
- LogRel(("HWACCM: VT-x/AMD-V init method: %s\n", (pVM->hwaccm.s.fGlobalInit) ? "GLOBAL" : "LOCAL"));
- RTLogRelSetBuffering(fOldBuffered);
- return VINF_SUCCESS;
-}
-
-
-/**
- * Applies relocations to data and code managed by this
- * component. This function will be called at init and
- * whenever the VMM need to relocate it self inside the GC.
- *
- * @param pVM The VM.
- */
-VMMR3DECL(void) HWACCMR3Relocate(PVM pVM)
-{
- Log(("HWACCMR3Relocate to %RGv\n", MMHyperGetArea(pVM, 0)));
-
- /* Fetch the current paging mode during the relocate callback during state loading. */
- if (VMR3GetState(pVM) == VMSTATE_LOADING)
- {
- for (VMCPUID i = 0; i < pVM->cCpus; i++)
- {
- PVMCPU pVCpu = &pVM->aCpus[i];
-
- pVCpu->hwaccm.s.enmShadowMode = PGMGetShadowMode(pVCpu);
- Assert(pVCpu->hwaccm.s.vmx.enmCurrGuestMode == PGMGetGuestMode(pVCpu));
- pVCpu->hwaccm.s.vmx.enmCurrGuestMode = PGMGetGuestMode(pVCpu);
- }
- }
-#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
- if (pVM->fHWACCMEnabled)
- {
- int rc;
- switch (PGMGetHostMode(pVM))
- {
- case PGMMODE_32_BIT:
- pVM->hwaccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_32_TO_AMD64);
- break;
-
- case PGMMODE_PAE:
- case PGMMODE_PAE_NX:
- pVM->hwaccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_PAE_TO_AMD64);
- break;
-
- default:
- AssertFailed();
- break;
- }
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "VMXGCStartVM64", &pVM->hwaccm.s.pfnVMXGCStartVM64);
- AssertReleaseMsgRC(rc, ("VMXGCStartVM64 -> rc=%Rrc\n", rc));
-
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "SVMGCVMRun64", &pVM->hwaccm.s.pfnSVMGCVMRun64);
- AssertReleaseMsgRC(rc, ("SVMGCVMRun64 -> rc=%Rrc\n", rc));
-
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMSaveGuestFPU64", &pVM->hwaccm.s.pfnSaveGuestFPU64);
- AssertReleaseMsgRC(rc, ("HWACCMSetupFPU64 -> rc=%Rrc\n", rc));
-
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMSaveGuestDebug64", &pVM->hwaccm.s.pfnSaveGuestDebug64);
- AssertReleaseMsgRC(rc, ("HWACCMSetupDebug64 -> rc=%Rrc\n", rc));
-
-# ifdef DEBUG
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMTestSwitcher64", &pVM->hwaccm.s.pfnTest64);
- AssertReleaseMsgRC(rc, ("HWACCMTestSwitcher64 -> rc=%Rrc\n", rc));
-# endif
- }
-#endif
- return;
-}
-
-
-/**
- * Checks if hardware accelerated raw mode is allowed.
- *
- * @returns true if hardware acceleration is allowed, otherwise false.
- * @param pVM Pointer to the VM.
- */
-VMMR3DECL(bool) HWACCMR3IsAllowed(PVM pVM)
-{
- return pVM->hwaccm.s.fAllowed;
-}
-
-
-/**
- * Notification callback which is called whenever there is a chance that a CR3
- * value might have changed.
- *
- * This is called by PGM.
- *
- * @param pVM Pointer to the VM.
- * @param pVCpu Pointer to the VMCPU.
- * @param enmShadowMode New shadow paging mode.
- * @param enmGuestMode New guest paging mode.
- */
-VMMR3DECL(void) HWACCMR3PagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
-{
- /* Ignore page mode changes during state loading. */
- if (VMR3GetState(pVCpu->pVMR3) == VMSTATE_LOADING)
- return;
-
- pVCpu->hwaccm.s.enmShadowMode = enmShadowMode;
-
- if ( pVM->hwaccm.s.vmx.fEnabled
- && pVM->fHWACCMEnabled)
- {
- if ( pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
- && enmGuestMode >= PGMMODE_PROTECTED)
- {
- PCPUMCTX pCtx;
-
- pCtx = CPUMQueryGuestCtxPtr(pVCpu);
-
- /* After a real mode switch to protected mode we must force
- CPL to 0. Our real mode emulation had to set it to 3. */
- pCtx->ss.Attr.n.u2Dpl = 0;
- }
- }
-
- if (pVCpu->hwaccm.s.vmx.enmCurrGuestMode != enmGuestMode)
- {
- /* Keep track of paging mode changes. */
- pVCpu->hwaccm.s.vmx.enmPrevGuestMode = pVCpu->hwaccm.s.vmx.enmCurrGuestMode;
- pVCpu->hwaccm.s.vmx.enmCurrGuestMode = enmGuestMode;
-
- /* Did we miss a change, because all code was executed in the recompiler? */
- if (pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == enmGuestMode)
- {
- Log(("HWACCMR3PagingModeChanged missed %s->%s transition (prev %s)\n", PGMGetModeName(pVCpu->hwaccm.s.vmx.enmPrevGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmCurrGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode)));
- pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = pVCpu->hwaccm.s.vmx.enmPrevGuestMode;
- }
- }
-
- /* Reset the contents of the read cache. */
- PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
- for (unsigned j = 0; j < pCache->Read.cValidEntries; j++)
- pCache->Read.aFieldVal[j] = 0;
-}
-
-
-/**
- * Terminates the HWACCM.
- *
- * Termination means cleaning up and freeing all resources,
- * the VM itself is, at this point, powered off or suspended.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- */
-VMMR3DECL(int) HWACCMR3Term(PVM pVM)
-{
- if (pVM->hwaccm.s.vmx.pRealModeTSS)
- {
- PDMR3VMMDevHeapFree(pVM, pVM->hwaccm.s.vmx.pRealModeTSS);
- pVM->hwaccm.s.vmx.pRealModeTSS = 0;
- }
- hwaccmR3TermCPU(pVM);
- return 0;
-}
-
-
-/**
- * Terminates the per-VCPU HWACCM.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- */
-static int hwaccmR3TermCPU(PVM pVM)
-{
- for (VMCPUID i = 0; i < pVM->cCpus; i++)
- {
- PVMCPU pVCpu = &pVM->aCpus[i]; NOREF(pVCpu);
-
-#ifdef VBOX_WITH_STATISTICS
- if (pVCpu->hwaccm.s.paStatExitReason)
- {
- MMHyperFree(pVM, pVCpu->hwaccm.s.paStatExitReason);
- pVCpu->hwaccm.s.paStatExitReason = NULL;
- pVCpu->hwaccm.s.paStatExitReasonR0 = NIL_RTR0PTR;
- }
- if (pVCpu->hwaccm.s.paStatInjectedIrqs)
- {
- MMHyperFree(pVM, pVCpu->hwaccm.s.paStatInjectedIrqs);
- pVCpu->hwaccm.s.paStatInjectedIrqs = NULL;
- pVCpu->hwaccm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR;
- }
-#endif
-
-#ifdef VBOX_WITH_CRASHDUMP_MAGIC
- memset(pVCpu->hwaccm.s.vmx.VMCSCache.aMagic, 0, sizeof(pVCpu->hwaccm.s.vmx.VMCSCache.aMagic));
- pVCpu->hwaccm.s.vmx.VMCSCache.uMagic = 0;
- pVCpu->hwaccm.s.vmx.VMCSCache.uPos = 0xffffffff;
-#endif
- }
- return 0;
-}
-
-
-/**
- * Resets a virtual CPU.
- *
- * Used by HWACCMR3Reset and CPU hot plugging.
- *
- * @param pVCpu The CPU to reset.
- */
-VMMR3DECL(void) HWACCMR3ResetCpu(PVMCPU pVCpu)
-{
- /* On first entry we'll sync everything. */
- pVCpu->hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
-
- pVCpu->hwaccm.s.vmx.cr0_mask = 0;
- pVCpu->hwaccm.s.vmx.cr4_mask = 0;
-
- pVCpu->hwaccm.s.fActive = false;
- pVCpu->hwaccm.s.Event.fPending = false;
-
- /* Reset state information for real-mode emulation in VT-x. */
- pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
- pVCpu->hwaccm.s.vmx.enmPrevGuestMode = PGMMODE_REAL;
- pVCpu->hwaccm.s.vmx.enmCurrGuestMode = PGMMODE_REAL;
-
- /* Reset the contents of the read cache. */
- PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
- for (unsigned j = 0; j < pCache->Read.cValidEntries; j++)
- pCache->Read.aFieldVal[j] = 0;
-
-#ifdef VBOX_WITH_CRASHDUMP_MAGIC
- /* Magic marker for searching in crash dumps. */
- strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
- pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
-#endif
-}
-
-
-/**
- * The VM is being reset.
- *
- * For the HWACCM component this means that any GDT/LDT/TSS monitors
- * needs to be removed.
- *
- * @param pVM Pointer to the VM.
- */
-VMMR3DECL(void) HWACCMR3Reset(PVM pVM)
-{
- LogFlow(("HWACCMR3Reset:\n"));
-
- if (pVM->fHWACCMEnabled)
- hwaccmR3DisableRawMode(pVM);
-
- for (VMCPUID i = 0; i < pVM->cCpus; i++)
- {
- PVMCPU pVCpu = &pVM->aCpus[i];
-
- HWACCMR3ResetCpu(pVCpu);
- }
-
- /* Clear all patch information. */
- pVM->hwaccm.s.pGuestPatchMem = 0;
- pVM->hwaccm.s.pFreeGuestPatchMem = 0;
- pVM->hwaccm.s.cbGuestPatchMem = 0;
- pVM->hwaccm.s.cPatches = 0;
- pVM->hwaccm.s.PatchTree = 0;
- pVM->hwaccm.s.fTPRPatchingActive = false;
- ASMMemZero32(pVM->hwaccm.s.aPatches, sizeof(pVM->hwaccm.s.aPatches));
-}
-
-
-/**
- * Callback to patch a TPR instruction (vmmcall or mov cr8).
- *
- * @returns VBox strict status code.
- * @param pVM Pointer to the VM.
- * @param pVCpu The VMCPU for the EMT we're being called on.
- * @param pvUser Unused.
- */
-DECLCALLBACK(VBOXSTRICTRC) hwaccmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
-{
- VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
-
- /* Only execute the handler on the VCPU the original patch request was issued. */
- if (pVCpu->idCpu != idCpu)
- return VINF_SUCCESS;
-
- Log(("hwaccmR3RemovePatches\n"));
- for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
- {
- uint8_t abInstr[15];
- PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
- RTGCPTR pInstrGC = (RTGCPTR)pPatch->Core.Key;
- int rc;
-
-#ifdef LOG_ENABLED
- char szOutput[256];
-
- rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
- szOutput, sizeof(szOutput), NULL);
- if (RT_SUCCESS(rc))
- Log(("Patched instr: %s\n", szOutput));
-#endif
-
- /* Check if the instruction is still the same. */
- rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pInstrGC, pPatch->cbNewOp);
- if (rc != VINF_SUCCESS)
- {
- Log(("Patched code removed? (rc=%Rrc0\n", rc));
- continue; /* swapped out or otherwise removed; skip it. */
- }
-
- if (memcmp(abInstr, pPatch->aNewOpcode, pPatch->cbNewOp))
- {
- Log(("Patched instruction was changed! (rc=%Rrc0\n", rc));
- continue; /* skip it. */
- }
-
- rc = PGMPhysSimpleWriteGCPtr(pVCpu, pInstrGC, pPatch->aOpcode, pPatch->cbOp);
- AssertRC(rc);
-
-#ifdef LOG_ENABLED
- rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
- szOutput, sizeof(szOutput), NULL);
- if (RT_SUCCESS(rc))
- Log(("Original instr: %s\n", szOutput));
-#endif
- }
- pVM->hwaccm.s.cPatches = 0;
- pVM->hwaccm.s.PatchTree = 0;
- pVM->hwaccm.s.pFreeGuestPatchMem = pVM->hwaccm.s.pGuestPatchMem;
- pVM->hwaccm.s.fTPRPatchingActive = false;
- return VINF_SUCCESS;
-}
-
-
-/**
- * Worker for enabling patching in a VT-x/AMD-V guest.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param idCpu VCPU to execute hwaccmR3RemovePatches on.
- * @param pPatchMem Patch memory range.
- * @param cbPatchMem Size of the memory range.
- */
-static int hwaccmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
-{
- int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hwaccmR3RemovePatches, (void *)(uintptr_t)idCpu);
- AssertRC(rc);
-
- pVM->hwaccm.s.pGuestPatchMem = pPatchMem;
- pVM->hwaccm.s.pFreeGuestPatchMem = pPatchMem;
- pVM->hwaccm.s.cbGuestPatchMem = cbPatchMem;
- return VINF_SUCCESS;
-}
-
-
-/**
- * Enable patching in a VT-x/AMD-V guest
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pPatchMem Patch memory range.
- * @param cbPatchMem Size of the memory range.
- */
-VMMR3DECL(int) HWACMMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
-{
- VM_ASSERT_EMT(pVM);
- Log(("HWACMMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
- if (pVM->cCpus > 1)
- {
- /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
- int rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE,
- (PFNRT)hwaccmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
- AssertRC(rc);
- return rc;
- }
- return hwaccmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
-}
-
-
-/**
- * Disable patching in a VT-x/AMD-V guest.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pPatchMem Patch memory range.
- * @param cbPatchMem Size of the memory range.
- */
-VMMR3DECL(int) HWACMMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
-{
- Log(("HWACMMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
-
- Assert(pVM->hwaccm.s.pGuestPatchMem == pPatchMem);
- Assert(pVM->hwaccm.s.cbGuestPatchMem == cbPatchMem);
-
- /* @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */
- int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hwaccmR3RemovePatches, (void *)(uintptr_t)VMMGetCpuId(pVM));
- AssertRC(rc);
-
- pVM->hwaccm.s.pGuestPatchMem = 0;
- pVM->hwaccm.s.pFreeGuestPatchMem = 0;
- pVM->hwaccm.s.cbGuestPatchMem = 0;
- pVM->hwaccm.s.fTPRPatchingActive = false;
- return VINF_SUCCESS;
-}
-
-
-/**
- * Callback to patch a TPR instruction (vmmcall or mov cr8).
- *
- * @returns VBox strict status code.
- * @param pVM Pointer to the VM.
- * @param pVCpu The VMCPU for the EMT we're being called on.
- * @param pvUser User specified CPU context.
- *
- */
-DECLCALLBACK(VBOXSTRICTRC) hwaccmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
-{
- /*
- * Only execute the handler on the VCPU the original patch request was
- * issued. (The other CPU(s) might not yet have switched to protected
- * mode, nor have the correct memory context.)
- */
- VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
- if (pVCpu->idCpu != idCpu)
- return VINF_SUCCESS;
-
- /*
- * We're racing other VCPUs here, so don't try patch the instruction twice
- * and make sure there is still room for our patch record.
- */
- PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
- PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
- if (pPatch)
- {
- Log(("hwaccmR3ReplaceTprInstr: already patched %RGv\n", pCtx->rip));
- return VINF_SUCCESS;
- }
- uint32_t const idx = pVM->hwaccm.s.cPatches;
- if (idx >= RT_ELEMENTS(pVM->hwaccm.s.aPatches))
- {
- Log(("hwaccmR3ReplaceTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
- return VINF_SUCCESS;
- }
- pPatch = &pVM->hwaccm.s.aPatches[idx];
-
- Log(("hwaccmR3ReplaceTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
-
- /*
- * Disassembler the instruction and get cracking.
- */
- DBGFR3DisasInstrCurrentLog(pVCpu, "hwaccmR3ReplaceTprInstr");
- PDISCPUSTATE pDis = &pVCpu->hwaccm.s.DisState;
- uint32_t cbOp;
- int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
- AssertRC(rc);
- if ( rc == VINF_SUCCESS
- && pDis->pCurInstr->uOpcode == OP_MOV
- && cbOp >= 3)
- {
- static uint8_t const s_abVMMCall[3] = { 0x0f, 0x01, 0xd9 };
-
- rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
- AssertRC(rc);
-
- pPatch->cbOp = cbOp;
-
- if (pDis->Param1.fUse == DISUSE_DISPLACEMENT32)
- {
- /* write. */
- if (pDis->Param2.fUse == DISUSE_REG_GEN32)
- {
- pPatch->enmType = HWACCMTPRINSTR_WRITE_REG;
- pPatch->uSrcOperand = pDis->Param2.Base.idxGenReg;
- Log(("hwaccmR3ReplaceTprInstr: HWACCMTPRINSTR_WRITE_REG %u\n", pDis->Param2.Base.idxGenReg));
- }
- else
- {
- Assert(pDis->Param2.fUse == DISUSE_IMMEDIATE32);
- pPatch->enmType = HWACCMTPRINSTR_WRITE_IMM;
- pPatch->uSrcOperand = pDis->Param2.uValue;
- Log(("hwaccmR3ReplaceTprInstr: HWACCMTPRINSTR_WRITE_IMM %#llx\n", pDis->Param2.uValue));
- }
- rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
- AssertRC(rc);
-
- memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
- pPatch->cbNewOp = sizeof(s_abVMMCall);
- }
- else
- {
- /*
- * TPR Read.
- *
- * Found:
- * mov eax, dword [fffe0080] (5 bytes)
- * Check if next instruction is:
- * shr eax, 4
- */
- Assert(pDis->Param1.fUse == DISUSE_REG_GEN32);
-
- uint8_t const idxMmioReg = pDis->Param1.Base.idxGenReg;
- uint8_t const cbOpMmio = cbOp;
- uint64_t const uSavedRip = pCtx->rip;
-
- pCtx->rip += cbOp;
- rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
- DBGFR3DisasInstrCurrentLog(pVCpu, "Following read");
- pCtx->rip = uSavedRip;
-
- if ( rc == VINF_SUCCESS
- && pDis->pCurInstr->uOpcode == OP_SHR
- && pDis->Param1.fUse == DISUSE_REG_GEN32
- && pDis->Param1.Base.idxGenReg == idxMmioReg
- && pDis->Param2.fUse == DISUSE_IMMEDIATE8
- && pDis->Param2.uValue == 4
- && cbOpMmio + cbOp < sizeof(pVM->hwaccm.s.aPatches[idx].aOpcode))
- {
- uint8_t abInstr[15];
-
- /* Replacing two instructions now. */
- rc = PGMPhysSimpleReadGCPtr(pVCpu, &pPatch->aOpcode, pCtx->rip, cbOpMmio + cbOp);
- AssertRC(rc);
-
- pPatch->cbOp = cbOpMmio + cbOp;
-
- /* 0xF0, 0x0F, 0x20, 0xC0 = mov eax, cr8 */
- abInstr[0] = 0xF0;
- abInstr[1] = 0x0F;
- abInstr[2] = 0x20;
- abInstr[3] = 0xC0 | pDis->Param1.Base.idxGenReg;
- for (unsigned i = 4; i < pPatch->cbOp; i++)
- abInstr[i] = 0x90; /* nop */
-
- rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, abInstr, pPatch->cbOp);
- AssertRC(rc);
-
- memcpy(pPatch->aNewOpcode, abInstr, pPatch->cbOp);
- pPatch->cbNewOp = pPatch->cbOp;
-
- Log(("Acceptable read/shr candidate!\n"));
- pPatch->enmType = HWACCMTPRINSTR_READ_SHR4;
- }
- else
- {
- pPatch->enmType = HWACCMTPRINSTR_READ;
- pPatch->uDstOperand = idxMmioReg;
-
- rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
- AssertRC(rc);
-
- memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
- pPatch->cbNewOp = sizeof(s_abVMMCall);
- Log(("hwaccmR3ReplaceTprInstr: HWACCMTPRINSTR_READ %u\n", pPatch->uDstOperand));
- }
- }
-
- pPatch->Core.Key = pCtx->eip;
- rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
- AssertRC(rc);
-
- pVM->hwaccm.s.cPatches++;
- STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceSuccess);
- return VINF_SUCCESS;
- }
-
- /*
- * Save invalid patch, so we will not try again.
- */
- Log(("hwaccmR3ReplaceTprInstr: Failed to patch instr!\n"));
- pPatch->Core.Key = pCtx->eip;
- pPatch->enmType = HWACCMTPRINSTR_INVALID;
- rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
- AssertRC(rc);
- pVM->hwaccm.s.cPatches++;
- STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceFailure);
- return VINF_SUCCESS;
-}
-
-
-/**
- * Callback to patch a TPR instruction (jump to generated code).
- *
- * @returns VBox strict status code.
- * @param pVM Pointer to the VM.
- * @param pVCpu The VMCPU for the EMT we're being called on.
- * @param pvUser User specified CPU context.
- *
- */
-DECLCALLBACK(VBOXSTRICTRC) hwaccmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
-{
- /*
- * Only execute the handler on the VCPU the original patch request was
- * issued. (The other CPU(s) might not yet have switched to protected
- * mode, nor have the correct memory context.)
- */
- VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
- if (pVCpu->idCpu != idCpu)
- return VINF_SUCCESS;
-
- /*
- * We're racing other VCPUs here, so don't try patch the instruction twice
- * and make sure there is still room for our patch record.
- */
- PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
- PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
- if (pPatch)
- {
- Log(("hwaccmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
- return VINF_SUCCESS;
- }
- uint32_t const idx = pVM->hwaccm.s.cPatches;
- if (idx >= RT_ELEMENTS(pVM->hwaccm.s.aPatches))
- {
- Log(("hwaccmR3PatchTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
- return VINF_SUCCESS;
- }
- pPatch = &pVM->hwaccm.s.aPatches[idx];
-
- Log(("hwaccmR3PatchTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
- DBGFR3DisasInstrCurrentLog(pVCpu, "hwaccmR3PatchTprInstr");
-
- /*
- * Disassemble the instruction and get cracking.
- */
- PDISCPUSTATE pDis = &pVCpu->hwaccm.s.DisState;
- uint32_t cbOp;
- int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
- AssertRC(rc);
- if ( rc == VINF_SUCCESS
- && pDis->pCurInstr->uOpcode == OP_MOV
- && cbOp >= 5)
- {
- uint8_t aPatch[64];
- uint32_t off = 0;
-
- rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
- AssertRC(rc);
-
- pPatch->cbOp = cbOp;
- pPatch->enmType = HWACCMTPRINSTR_JUMP_REPLACEMENT;
-
- if (pDis->Param1.fUse == DISUSE_DISPLACEMENT32)
- {
- /*
- * TPR write:
- *
- * push ECX [51]
- * push EDX [52]
- * push EAX [50]
- * xor EDX,EDX [31 D2]
- * mov EAX,EAX [89 C0]
- * or
- * mov EAX,0000000CCh [B8 CC 00 00 00]
- * mov ECX,0C0000082h [B9 82 00 00 C0]
- * wrmsr [0F 30]
- * pop EAX [58]
- * pop EDX [5A]
- * pop ECX [59]
- * jmp return_address [E9 return_address]
- *
- */
- bool fUsesEax = (pDis->Param2.fUse == DISUSE_REG_GEN32 && pDis->Param2.Base.idxGenReg == DISGREG_EAX);
-
- aPatch[off++] = 0x51; /* push ecx */
- aPatch[off++] = 0x52; /* push edx */
- if (!fUsesEax)
- aPatch[off++] = 0x50; /* push eax */
- aPatch[off++] = 0x31; /* xor edx, edx */
- aPatch[off++] = 0xD2;
- if (pDis->Param2.fUse == DISUSE_REG_GEN32)
- {
- if (!fUsesEax)
- {
- aPatch[off++] = 0x89; /* mov eax, src_reg */
- aPatch[off++] = MAKE_MODRM(3, pDis->Param2.Base.idxGenReg, DISGREG_EAX);
- }
- }
- else
- {
- Assert(pDis->Param2.fUse == DISUSE_IMMEDIATE32);
- aPatch[off++] = 0xB8; /* mov eax, immediate */
- *(uint32_t *)&aPatch[off] = pDis->Param2.uValue;
- off += sizeof(uint32_t);
- }
- aPatch[off++] = 0xB9; /* mov ecx, 0xc0000082 */
- *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
- off += sizeof(uint32_t);
-
- aPatch[off++] = 0x0F; /* wrmsr */
- aPatch[off++] = 0x30;
- if (!fUsesEax)
- aPatch[off++] = 0x58; /* pop eax */
- aPatch[off++] = 0x5A; /* pop edx */
- aPatch[off++] = 0x59; /* pop ecx */
- }
- else
- {
- /*
- * TPR read:
- *
- * push ECX [51]
- * push EDX [52]
- * push EAX [50]
- * mov ECX,0C0000082h [B9 82 00 00 C0]
- * rdmsr [0F 32]
- * mov EAX,EAX [89 C0]
- * pop EAX [58]
- * pop EDX [5A]
- * pop ECX [59]
- * jmp return_address [E9 return_address]
- *
- */
- Assert(pDis->Param1.fUse == DISUSE_REG_GEN32);
-
- if (pDis->Param1.Base.idxGenReg != DISGREG_ECX)
- aPatch[off++] = 0x51; /* push ecx */
- if (pDis->Param1.Base.idxGenReg != DISGREG_EDX )
- aPatch[off++] = 0x52; /* push edx */
- if (pDis->Param1.Base.idxGenReg != DISGREG_EAX)
- aPatch[off++] = 0x50; /* push eax */
-
- aPatch[off++] = 0x31; /* xor edx, edx */
- aPatch[off++] = 0xD2;
-
- aPatch[off++] = 0xB9; /* mov ecx, 0xc0000082 */
- *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
- off += sizeof(uint32_t);
-
- aPatch[off++] = 0x0F; /* rdmsr */
- aPatch[off++] = 0x32;
-
- if (pDis->Param1.Base.idxGenReg != DISGREG_EAX)
- {
- aPatch[off++] = 0x89; /* mov dst_reg, eax */
- aPatch[off++] = MAKE_MODRM(3, DISGREG_EAX, pDis->Param1.Base.idxGenReg);
- }
-
- if (pDis->Param1.Base.idxGenReg != DISGREG_EAX)
- aPatch[off++] = 0x58; /* pop eax */
- if (pDis->Param1.Base.idxGenReg != DISGREG_EDX )
- aPatch[off++] = 0x5A; /* pop edx */
- if (pDis->Param1.Base.idxGenReg != DISGREG_ECX)
- aPatch[off++] = 0x59; /* pop ecx */
- }
- aPatch[off++] = 0xE9; /* jmp return_address */
- *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hwaccm.s.pFreeGuestPatchMem + off + 4);
- off += sizeof(RTRCUINTPTR);
-
- if (pVM->hwaccm.s.pFreeGuestPatchMem + off <= pVM->hwaccm.s.pGuestPatchMem + pVM->hwaccm.s.cbGuestPatchMem)
- {
- /* Write new code to the patch buffer. */
- rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hwaccm.s.pFreeGuestPatchMem, aPatch, off);
- AssertRC(rc);
-
-#ifdef LOG_ENABLED
- uint32_t cbCurInstr;
- for (RTGCPTR GCPtrInstr = pVM->hwaccm.s.pFreeGuestPatchMem;
- GCPtrInstr < pVM->hwaccm.s.pFreeGuestPatchMem + off;
- GCPtrInstr += RT_MAX(cbCurInstr, 1))
- {
- char szOutput[256];
- rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs.Sel, GCPtrInstr, DBGF_DISAS_FLAGS_DEFAULT_MODE,
- szOutput, sizeof(szOutput), &cbCurInstr);
- if (RT_SUCCESS(rc))
- Log(("Patch instr %s\n", szOutput));
- else
- Log(("%RGv: rc=%Rrc\n", GCPtrInstr, rc));
- }
-#endif
-
- pPatch->aNewOpcode[0] = 0xE9;
- *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hwaccm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
-
- /* Overwrite the TPR instruction with a jump. */
- rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->eip, pPatch->aNewOpcode, 5);
- AssertRC(rc);
-
- DBGFR3DisasInstrCurrentLog(pVCpu, "Jump");
-
- pVM->hwaccm.s.pFreeGuestPatchMem += off;
- pPatch->cbNewOp = 5;
-
- pPatch->Core.Key = pCtx->eip;
- rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
- AssertRC(rc);
-
- pVM->hwaccm.s.cPatches++;
- pVM->hwaccm.s.fTPRPatchingActive = true;
- STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchSuccess);
- return VINF_SUCCESS;
- }
-
- Log(("Ran out of space in our patch buffer!\n"));
- }
- else
- Log(("hwaccmR3PatchTprInstr: Failed to patch instr!\n"));
-
-
- /*
- * Save invalid patch, so we will not try again.
- */
- pPatch = &pVM->hwaccm.s.aPatches[idx];
- pPatch->Core.Key = pCtx->eip;
- pPatch->enmType = HWACCMTPRINSTR_INVALID;
- rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
- AssertRC(rc);
- pVM->hwaccm.s.cPatches++;
- STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchFailure);
- return VINF_SUCCESS;
-}
-
-
-/**
- * Attempt to patch TPR mmio instructions.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pVCpu Pointer to the VMCPU.
- * @param pCtx Pointer to the guest CPU context.
- */
-VMMR3DECL(int) HWACCMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
-{
- NOREF(pCtx);
- int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE,
- pVM->hwaccm.s.pGuestPatchMem ? hwaccmR3PatchTprInstr : hwaccmR3ReplaceTprInstr,
- (void *)(uintptr_t)pVCpu->idCpu);
- AssertRC(rc);
- return rc;
-}
-
-
-/**
- * Force execution of the current IO code in the recompiler.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pCtx Partial VM execution context.
- */
-VMMR3DECL(int) HWACCMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx)
-{
- PVMCPU pVCpu = VMMGetCpu(pVM);
-
- Assert(pVM->fHWACCMEnabled);
- Log(("HWACCMR3EmulateIoBlock\n"));
-
- /* This is primarily intended to speed up Grub, so we don't care about paged protected mode. */
- if (HWACCMCanEmulateIoBlockEx(pCtx))
- {
- Log(("HWACCMR3EmulateIoBlock -> enabled\n"));
- pVCpu->hwaccm.s.EmulateIoBlock.fEnabled = true;
- pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip = pCtx->rip;
- pVCpu->hwaccm.s.EmulateIoBlock.cr0 = pCtx->cr0;
- return VINF_EM_RESCHEDULE_REM;
- }
- return VINF_SUCCESS;
-}
-
-
-/**
- * Checks if we can currently use hardware accelerated raw mode.
- *
- * @returns true if we can currently use hardware acceleration, otherwise false.
- * @param pVM Pointer to the VM.
- * @param pCtx Partial VM execution context.
- */
-VMMR3DECL(bool) HWACCMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)
-{
- PVMCPU pVCpu = VMMGetCpu(pVM);
-
- Assert(pVM->fHWACCMEnabled);
-
- /* If we're still executing the IO code, then return false. */
- if ( RT_UNLIKELY(pVCpu->hwaccm.s.EmulateIoBlock.fEnabled)
- && pCtx->rip < pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip + 0x200
- && pCtx->rip > pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip - 0x200
- && pCtx->cr0 == pVCpu->hwaccm.s.EmulateIoBlock.cr0)
- return false;
-
- pVCpu->hwaccm.s.EmulateIoBlock.fEnabled = false;
-
- /* AMD-V supports real & protected mode with or without paging. */
- if (pVM->hwaccm.s.svm.fEnabled)
- {
- pVCpu->hwaccm.s.fActive = true;
- return true;
- }
-
- pVCpu->hwaccm.s.fActive = false;
-
- /* Note! The context supplied by REM is partial. If we add more checks here, be sure to verify that REM provides this info! */
- Assert((pVM->hwaccm.s.vmx.fUnrestrictedGuest && !pVM->hwaccm.s.vmx.pRealModeTSS) || (!pVM->hwaccm.s.vmx.fUnrestrictedGuest && pVM->hwaccm.s.vmx.pRealModeTSS));
-
- bool fSupportsRealMode = pVM->hwaccm.s.vmx.fUnrestrictedGuest || PDMVMMDevHeapIsEnabled(pVM);
- if (!pVM->hwaccm.s.vmx.fUnrestrictedGuest)
- {
- /*
- * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted
- * guest execution feature i missing (VT-x only).
- */
- if (fSupportsRealMode)
- {
- if (CPUMIsGuestInRealModeEx(pCtx))
- {
- /* In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
- * bases and limits, i.e. limit must be 64K and base must be selector * 16.
- * If this is not true, we cannot execute real mode as V86 and have to fall
- * back to emulation.
- */
- if ( pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)
- || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)
- || pCtx->es.Sel != (pCtx->es.u64Base >> 4)
- || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)
- || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)
- || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4)
- || (pCtx->cs.u32Limit != 0xffff)
- || (pCtx->ds.u32Limit != 0xffff)
- || (pCtx->es.u32Limit != 0xffff)
- || (pCtx->ss.u32Limit != 0xffff)
- || (pCtx->fs.u32Limit != 0xffff)
- || (pCtx->gs.u32Limit != 0xffff))
- {
- return false;
- }
- }
- else
- {
- PGMMODE enmGuestMode = PGMGetGuestMode(pVCpu);
- /* Verify the requirements for executing code in protected
- mode. VT-x can't handle the CPU state right after a switch
- from real to protected mode. (all sorts of RPL & DPL assumptions) */
- if ( pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
- && enmGuestMode >= PGMMODE_PROTECTED)
- {
- if ( (pCtx->cs.Sel & X86_SEL_RPL)
- || (pCtx->ds.Sel & X86_SEL_RPL)
- || (pCtx->es.Sel & X86_SEL_RPL)
- || (pCtx->fs.Sel & X86_SEL_RPL)
- || (pCtx->gs.Sel & X86_SEL_RPL)
- || (pCtx->ss.Sel & X86_SEL_RPL))
- {
- return false;
- }
- }
- /* VT-x also chokes on invalid tr or ldtr selectors (minix) */
- if ( pCtx->gdtr.cbGdt
- && ( pCtx->tr.Sel > pCtx->gdtr.cbGdt
- || pCtx->ldtr.Sel > pCtx->gdtr.cbGdt))
- {
- return false;
- }
- }
- }
- else
- {
- if ( !CPUMIsGuestInLongModeEx(pCtx)
- && !pVM->hwaccm.s.vmx.fUnrestrictedGuest)
- {
- /** @todo This should (probably) be set on every excursion to the REM,
- * however it's too risky right now. So, only apply it when we go
- * back to REM for real mode execution. (The XP hack below doesn't
- * work reliably without this.)
- * Update: Implemented in EM.cpp, see #ifdef EM_NOTIFY_HWACCM. */
- pVM->aCpus[0].hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
-
- if ( !pVM->hwaccm.s.fNestedPaging /* requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap*/
- || CPUMIsGuestInRealModeEx(pCtx)) /* requires a fake TSS for real mode - stored in the VMM device heap */
- return false;
-
- /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
- if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0)
- return false;
-
- /* The guest is about to complete the switch to protected mode. Wait a bit longer. */
- /* Windows XP; switch to protected mode; all selectors are marked not present in the
- * hidden registers (possible recompiler bug; see load_seg_vm) */
- if (pCtx->cs.Attr.n.u1Present == 0)
- return false;
- if (pCtx->ss.Attr.n.u1Present == 0)
- return false;
-
- /* Windows XP: possible same as above, but new recompiler requires new heuristics?
- VT-x doesn't seem to like something about the guest state and this stuff avoids it. */
- /** @todo This check is actually wrong, it doesn't take the direction of the
- * stack segment into account. But, it does the job for now. */
- if (pCtx->rsp >= pCtx->ss.u32Limit)
- return false;
-#if 0
- if ( pCtx->cs.Sel >= pCtx->gdtr.cbGdt
- || pCtx->ss.Sel >= pCtx->gdtr.cbGdt
- || pCtx->ds.Sel >= pCtx->gdtr.cbGdt
- || pCtx->es.Sel >= pCtx->gdtr.cbGdt
- || pCtx->fs.Sel >= pCtx->gdtr.cbGdt
- || pCtx->gs.Sel >= pCtx->gdtr.cbGdt)
- return false;
-#endif
- }
- }
- }
-
- if (pVM->hwaccm.s.vmx.fEnabled)
- {
- uint32_t mask;
-
- /* if bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
- mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0;
- /* Note: We ignore the NE bit here on purpose; see vmmr0\hwaccmr0.cpp for details. */
- mask &= ~X86_CR0_NE;
-
- if (fSupportsRealMode)
- {
- /* Note: We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
- mask &= ~(X86_CR0_PG|X86_CR0_PE);
- }
- else
- {
- /* We support protected mode without paging using identity mapping. */
- mask &= ~X86_CR0_PG;
- }
- if ((pCtx->cr0 & mask) != mask)
- return false;
-
- /* if bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
- mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1;
- if ((pCtx->cr0 & mask) != 0)
- return false;
-
- /* if bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
- mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0;
- mask &= ~X86_CR4_VMXE;
- if ((pCtx->cr4 & mask) != mask)
- return false;
-
- /* if bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
- mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1;
- if ((pCtx->cr4 & mask) != 0)
- return false;
-
- pVCpu->hwaccm.s.fActive = true;
- return true;
- }
-
- return false;
-}
-
-
-/**
- * Checks if we need to reschedule due to VMM device heap changes.
- *
- * @returns true if a reschedule is required, otherwise false.
- * @param pVM Pointer to the VM.
- * @param pCtx VM execution context.
- */
-VMMR3DECL(bool) HWACCMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx)
-{
- /*
- * The VMM device heap is a requirement for emulating real mode or protected mode without paging
- * when the unrestricted guest execution feature is missing (VT-x only).
- */
- if ( pVM->hwaccm.s.vmx.fEnabled
- && !pVM->hwaccm.s.vmx.fUnrestrictedGuest
- && !CPUMIsGuestInPagedProtectedModeEx(pCtx)
- && !PDMVMMDevHeapIsEnabled(pVM)
- && (pVM->hwaccm.s.fNestedPaging || CPUMIsGuestInRealModeEx(pCtx)))
- return true;
-
- return false;
-}
-
-
-/**
- * Notification from EM about a rescheduling into hardware assisted execution
- * mode.
- *
- * @param pVCpu Pointer to the current VMCPU.
- */
-VMMR3DECL(void) HWACCMR3NotifyScheduled(PVMCPU pVCpu)
-{
- pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
-}
-
-
-/**
- * Notification from EM about returning from instruction emulation (REM / EM).
- *
- * @param pVCpu Pointer to the VMCPU.
- */
-VMMR3DECL(void) HWACCMR3NotifyEmulated(PVMCPU pVCpu)
-{
- pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
-}
-
-
-/**
- * Checks if we are currently using hardware accelerated raw mode.
- *
- * @returns true if hardware acceleration is being used, otherwise false.
- * @param pVCpu Pointer to the VMCPU.
- */
-VMMR3DECL(bool) HWACCMR3IsActive(PVMCPU pVCpu)
-{
- return pVCpu->hwaccm.s.fActive;
-}
-
-
-/**
- * Checks if we are currently using nested paging.
- *
- * @returns true if nested paging is being used, otherwise false.
- * @param pVM Pointer to the VM.
- */
-VMMR3DECL(bool) HWACCMR3IsNestedPagingActive(PVM pVM)
-{
- return pVM->hwaccm.s.fNestedPaging;
-}
-
-
-/**
- * Checks if we are currently using VPID in VT-x mode.
- *
- * @returns true if VPID is being used, otherwise false.
- * @param pVM Pointer to the VM.
- */
-VMMR3DECL(bool) HWACCMR3IsVPIDActive(PVM pVM)
-{
- return pVM->hwaccm.s.vmx.fVPID;
-}
-
-
-/**
- * Checks if internal events are pending. In that case we are not allowed to dispatch interrupts.
- *
- * @returns true if an internal event is pending, otherwise false.
- * @param pVM Pointer to the VM.
- */
-VMMR3DECL(bool) HWACCMR3IsEventPending(PVMCPU pVCpu)
-{
- return HWACCMIsEnabled(pVCpu->pVMR3) && pVCpu->hwaccm.s.Event.fPending;
-}
-
-
-/**
- * Checks if the VMX-preemption timer is being used.
- *
- * @returns true if the VMX-preemption timer is being used, otherwise false.
- * @param pVM Pointer to the VM.
- */
-VMMR3DECL(bool) HWACCMR3IsVmxPreemptionTimerUsed(PVM pVM)
-{
- return HWACCMIsEnabled(pVM)
- && pVM->hwaccm.s.vmx.fEnabled
- && pVM->hwaccm.s.vmx.fUsePreemptTimer;
-}
-
-
-/**
- * Restart an I/O instruction that was refused in ring-0
- *
- * @returns Strict VBox status code. Informational status codes other than the one documented
- * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
- * @retval VINF_SUCCESS Success.
- * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
- * status code must be passed on to EM.
- * @retval VERR_NOT_FOUND if no pending I/O instruction.
- *
- * @param pVM Pointer to the VM.
- * @param pVCpu Pointer to the VMCPU.
- * @param pCtx Pointer to the guest CPU context.
- */
-VMMR3DECL(VBOXSTRICTRC) HWACCMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
-{
- HWACCMPENDINGIO enmType = pVCpu->hwaccm.s.PendingIO.enmType;
-
- pVCpu->hwaccm.s.PendingIO.enmType = HWACCMPENDINGIO_INVALID;
-
- if ( pVCpu->hwaccm.s.PendingIO.GCPtrRip != pCtx->rip
- || enmType == HWACCMPENDINGIO_INVALID)
- return VERR_NOT_FOUND;
-
- VBOXSTRICTRC rcStrict;
- switch (enmType)
- {
- case HWACCMPENDINGIO_PORT_READ:
- {
- uint32_t uAndVal = pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal;
- uint32_t u32Val = 0;
-
- rcStrict = IOMIOPortRead(pVM, pVCpu->hwaccm.s.PendingIO.s.Port.uPort,
- &u32Val,
- pVCpu->hwaccm.s.PendingIO.s.Port.cbSize);
- if (IOM_SUCCESS(rcStrict))
- {
- /* Write back to the EAX register. */
- pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
- pCtx->rip = pVCpu->hwaccm.s.PendingIO.GCPtrRipNext;
- }
- break;
- }
-
- case HWACCMPENDINGIO_PORT_WRITE:
- rcStrict = IOMIOPortWrite(pVM, pVCpu->hwaccm.s.PendingIO.s.Port.uPort,
- pCtx->eax & pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal,
- pVCpu->hwaccm.s.PendingIO.s.Port.cbSize);
- if (IOM_SUCCESS(rcStrict))
- pCtx->rip = pVCpu->hwaccm.s.PendingIO.GCPtrRipNext;
- break;
-
- default:
- AssertLogRelFailedReturn(VERR_HM_UNKNOWN_IO_INSTRUCTION);
- }
-
- return rcStrict;
-}
-
-
-/**
- * Inject an NMI into a running VM (only VCPU 0!)
- *
- * @returns boolean
- * @param pVM Pointer to the VM.
- */
-VMMR3DECL(int) HWACCMR3InjectNMI(PVM pVM)
-{
- VMCPU_FF_SET(&pVM->aCpus[0], VMCPU_FF_INTERRUPT_NMI);
- return VINF_SUCCESS;
-}
-
-
-/**
- * Check fatal VT-x/AMD-V error and produce some meaningful
- * log release message.
- *
- * @param pVM Pointer to the VM.
- * @param iStatusCode VBox status code.
- */
-VMMR3DECL(void) HWACCMR3CheckError(PVM pVM, int iStatusCode)
-{
- for (VMCPUID i = 0; i < pVM->cCpus; i++)
- {
- switch (iStatusCode)
- {
- case VERR_VMX_INVALID_VMCS_FIELD:
- break;
-
- case VERR_VMX_INVALID_VMCS_PTR:
- LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hwaccm.s.vmx.HCPhysVMCS));
- LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulVMCSRevision));
- LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Entered Cpu %d\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.idEnteredCpu));
- LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current Cpu %d\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.idCurrentCpu));
- break;
-
- case VERR_VMX_UNABLE_TO_START_VM:
- LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError));
- LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason));
- if (pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)
- {
- LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d MSRBitmapPhys %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pMSRBitmapPhys));
-#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
- LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d GuestMSRPhys %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pGuestMSRPhys));
- LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d HostMsrPhys %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pHostMSRPhys));
- LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d Cached MSRs %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.cCachedMSRs));
-#endif
- }
- /** @todo Log VM-entry event injection control fields
- * VMX_VMCS_CTRL_ENTRY_IRQ_INFO, VMX_VMCS_CTRL_ENTRY_EXCEPTION_ERRCODE
- * and VMX_VMCS_CTRL_ENTRY_INSTR_LENGTH from the VMCS. */
- break;
-
- case VERR_VMX_UNABLE_TO_RESUME_VM:
- LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError));
- LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason));
- break;
-
- case VERR_VMX_INVALID_VMXON_PTR:
- break;
- }
- }
-
- if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM)
- {
- LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed %x\n", pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1));
- LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %x\n", pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0));
- }
-}
-
-
-/**
- * Execute state save operation.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pSSM SSM operation handle.
- */
-static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM)
-{
- int rc;
-
- Log(("hwaccmR3Save:\n"));
-
- for (VMCPUID i = 0; i < pVM->cCpus; i++)
- {
- /*
- * Save the basic bits - fortunately all the other things can be resynced on load.
- */
- rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.fPending);
- AssertRCReturn(rc, rc);
- rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.errCode);
- AssertRCReturn(rc, rc);
- rc = SSMR3PutU64(pSSM, pVM->aCpus[i].hwaccm.s.Event.intInfo);
- AssertRCReturn(rc, rc);
-
- rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmLastSeenGuestMode);
- AssertRCReturn(rc, rc);
- rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmCurrGuestMode);
- AssertRCReturn(rc, rc);
- rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode);
- AssertRCReturn(rc, rc);
- }
-#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
- rc = SSMR3PutGCPtr(pSSM, pVM->hwaccm.s.pGuestPatchMem);
- AssertRCReturn(rc, rc);
- rc = SSMR3PutGCPtr(pSSM, pVM->hwaccm.s.pFreeGuestPatchMem);
- AssertRCReturn(rc, rc);
- rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.cbGuestPatchMem);
- AssertRCReturn(rc, rc);
-
- /* Store all the guest patch records too. */
- rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.cPatches);
- AssertRCReturn(rc, rc);
-
- for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
- {
- PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
-
- rc = SSMR3PutU32(pSSM, pPatch->Core.Key);
- AssertRCReturn(rc, rc);
-
- rc = SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
- AssertRCReturn(rc, rc);
-
- rc = SSMR3PutU32(pSSM, pPatch->cbOp);
- AssertRCReturn(rc, rc);
-
- rc = SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
- AssertRCReturn(rc, rc);
-
- rc = SSMR3PutU32(pSSM, pPatch->cbNewOp);
- AssertRCReturn(rc, rc);
-
- AssertCompileSize(HWACCMTPRINSTR, 4);
- rc = SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType);
- AssertRCReturn(rc, rc);
-
- rc = SSMR3PutU32(pSSM, pPatch->uSrcOperand);
- AssertRCReturn(rc, rc);
-
- rc = SSMR3PutU32(pSSM, pPatch->uDstOperand);
- AssertRCReturn(rc, rc);
-
- rc = SSMR3PutU32(pSSM, pPatch->pJumpTarget);
- AssertRCReturn(rc, rc);
-
- rc = SSMR3PutU32(pSSM, pPatch->cFaults);
- AssertRCReturn(rc, rc);
- }
-#endif
- return VINF_SUCCESS;
-}
-
-
-/**
- * Execute state load operation.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pSSM SSM operation handle.
- * @param uVersion Data layout version.
- * @param uPass The data pass.
- */
-static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
-{
- int rc;
-
- Log(("hwaccmR3Load:\n"));
- Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
-
- /*
- * Validate version.
- */
- if ( uVersion != HWACCM_SSM_VERSION
- && uVersion != HWACCM_SSM_VERSION_NO_PATCHING
- && uVersion != HWACCM_SSM_VERSION_2_0_X)
- {
- AssertMsgFailed(("hwaccmR3Load: Invalid version uVersion=%d!\n", uVersion));
- return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
- }
- for (VMCPUID i = 0; i < pVM->cCpus; i++)
- {
- rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.fPending);
- AssertRCReturn(rc, rc);
- rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.errCode);
- AssertRCReturn(rc, rc);
- rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hwaccm.s.Event.intInfo);
- AssertRCReturn(rc, rc);
-
- if (uVersion >= HWACCM_SSM_VERSION_NO_PATCHING)
- {
- uint32_t val;
-
- rc = SSMR3GetU32(pSSM, &val);
- AssertRCReturn(rc, rc);
- pVM->aCpus[i].hwaccm.s.vmx.enmLastSeenGuestMode = (PGMMODE)val;
-
- rc = SSMR3GetU32(pSSM, &val);
- AssertRCReturn(rc, rc);
- pVM->aCpus[i].hwaccm.s.vmx.enmCurrGuestMode = (PGMMODE)val;
-
- rc = SSMR3GetU32(pSSM, &val);
- AssertRCReturn(rc, rc);
- pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode = (PGMMODE)val;
- }
- }
-#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
- if (uVersion > HWACCM_SSM_VERSION_NO_PATCHING)
- {
- rc = SSMR3GetGCPtr(pSSM, &pVM->hwaccm.s.pGuestPatchMem);
- AssertRCReturn(rc, rc);
- rc = SSMR3GetGCPtr(pSSM, &pVM->hwaccm.s.pFreeGuestPatchMem);
- AssertRCReturn(rc, rc);
- rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.cbGuestPatchMem);
- AssertRCReturn(rc, rc);
-
- /* Fetch all TPR patch records. */
- rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.cPatches);
- AssertRCReturn(rc, rc);
-
- for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
- {
- PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
-
- rc = SSMR3GetU32(pSSM, &pPatch->Core.Key);
- AssertRCReturn(rc, rc);
-
- rc = SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
- AssertRCReturn(rc, rc);
-
- rc = SSMR3GetU32(pSSM, &pPatch->cbOp);
- AssertRCReturn(rc, rc);
-
- rc = SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
- AssertRCReturn(rc, rc);
-
- rc = SSMR3GetU32(pSSM, &pPatch->cbNewOp);
- AssertRCReturn(rc, rc);
-
- rc = SSMR3GetU32(pSSM, (uint32_t *)&pPatch->enmType);
- AssertRCReturn(rc, rc);
-
- if (pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT)
- pVM->hwaccm.s.fTPRPatchingActive = true;
-
- Assert(pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT || pVM->hwaccm.s.fTPRPatchingActive == false);
-
- rc = SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
- AssertRCReturn(rc, rc);
-
- rc = SSMR3GetU32(pSSM, &pPatch->uDstOperand);
- AssertRCReturn(rc, rc);
-
- rc = SSMR3GetU32(pSSM, &pPatch->cFaults);
- AssertRCReturn(rc, rc);
-
- rc = SSMR3GetU32(pSSM, &pPatch->pJumpTarget);
- AssertRCReturn(rc, rc);
-
- Log(("hwaccmR3Load: patch %d\n", i));
- Log(("Key = %x\n", pPatch->Core.Key));
- Log(("cbOp = %d\n", pPatch->cbOp));
- Log(("cbNewOp = %d\n", pPatch->cbNewOp));
- Log(("type = %d\n", pPatch->enmType));
- Log(("srcop = %d\n", pPatch->uSrcOperand));
- Log(("dstop = %d\n", pPatch->uDstOperand));
- Log(("cFaults = %d\n", pPatch->cFaults));
- Log(("target = %x\n", pPatch->pJumpTarget));
- rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
- AssertRC(rc);
- }
- }
-#endif
-
- /* Recheck all VCPUs if we can go straight into hwaccm execution mode. */
- if (HWACCMIsEnabled(pVM))
- {
- for (VMCPUID i = 0; i < pVM->cCpus; i++)
- {
- PVMCPU pVCpu = &pVM->aCpus[i];
-
- HWACCMR3CanExecuteGuest(pVM, CPUMQueryGuestCtxPtr(pVCpu));
- }
- }
- return VINF_SUCCESS;
-}
-
diff --git a/src/VBox/VMM/VMMR3/IEMR3.cpp b/src/VBox/VMM/VMMR3/IEMR3.cpp
index e383e06f..0c08343f 100644
--- a/src/VBox/VMM/VMMR3/IEMR3.cpp
+++ b/src/VBox/VMM/VMMR3/IEMR3.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2011 Oracle Corporation
+ * Copyright (C) 2011-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -20,13 +20,25 @@
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_EM
#include <VBox/vmm/iem.h>
+#include <VBox/vmm/cpum.h>
#include "IEMInternal.h"
#include <VBox/vmm/vm.h>
#include <VBox/err.h>
+#include <iprt/asm-amd64-x86.h>
#include <iprt/assert.h>
+
+/**
+ * Initializes the interpreted execution manager.
+ *
+ * This must be called after CPUM as we're quering information from CPUM about
+ * the guest and host CPUs.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ */
VMMR3DECL(int) IEMR3Init(PVM pVM)
{
for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
@@ -38,20 +50,50 @@ VMMR3DECL(int) IEMR3Init(PVM pVM)
pVCpu->iem.s.pCtxR0 = VM_R0_ADDR(pVM, pVCpu->iem.s.pCtxR3);
pVCpu->iem.s.pCtxRC = VM_RC_ADDR(pVM, pVCpu->iem.s.pCtxR3);
- STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
"Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu);
- STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
- "Potential exists", "/IEM/CPU%u/cPotentialExits", idCpu);
- STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Potential exits", "/IEM/CPU%u/cPotentialExits", idCpu);
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
"VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
- STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
"VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
- STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
"Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu);
- STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
"Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu);
- STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
+ STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
"Approx bytes written", "/IEM/CPU%u/cbWritten", idCpu);
+
+ /*
+ * Host and guest CPU information.
+ */
+ if (idCpu == 0)
+ {
+ uint32_t uIgnored;
+ CPUMGetGuestCpuId(pVCpu, 1, &uIgnored, &uIgnored,
+ &pVCpu->iem.s.fCpuIdStdFeaturesEcx, &pVCpu->iem.s.fCpuIdStdFeaturesEdx);
+ pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM);
+
+ ASMCpuId_ECX_EDX(1, &pVCpu->iem.s.fHostCpuIdStdFeaturesEcx, &pVCpu->iem.s.fHostCpuIdStdFeaturesEdx);
+ pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM);
+ }
+ else
+ {
+ pVCpu->iem.s.fCpuIdStdFeaturesEcx = pVM->aCpus[0].iem.s.fCpuIdStdFeaturesEcx;
+ pVCpu->iem.s.fCpuIdStdFeaturesEdx = pVM->aCpus[0].iem.s.fCpuIdStdFeaturesEdx;
+ pVCpu->iem.s.enmCpuVendor = pVM->aCpus[0].iem.s.enmCpuVendor;
+ pVCpu->iem.s.fHostCpuIdStdFeaturesEcx = pVM->aCpus[0].iem.s.fHostCpuIdStdFeaturesEcx;
+ pVCpu->iem.s.fHostCpuIdStdFeaturesEdx = pVM->aCpus[0].iem.s.fHostCpuIdStdFeaturesEdx;
+ pVCpu->iem.s.enmHostCpuVendor = pVM->aCpus[0].iem.s.enmHostCpuVendor;
+ }
+
+ /*
+ * Mark all buffers free.
+ */
+ uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
+ while (iMemMap-- > 0)
+ pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
}
return VINF_SUCCESS;
}
diff --git a/src/VBox/VMM/VMMR3/IOM.cpp b/src/VBox/VMM/VMMR3/IOM.cpp
index 6654fda8..667ce9f7 100644
--- a/src/VBox/VMM/VMMR3/IOM.cpp
+++ b/src/VBox/VMM/VMMR3/IOM.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -52,7 +52,7 @@
* execution.
*
*
- * @section sec_iom_hwaccm Hardware Assisted Virtualization Mode
+ * @section sec_iom_hm Hardware Assisted Virtualization Mode
*
* When running in hardware assisted virtualization mode we'll be doing much the
* same things as in raw-mode. The main difference is that we're running in the
@@ -88,6 +88,13 @@
*
*/
+/** @todo MMIO - simplifying the device end.
+ * - Add a return status for doing DBGFSTOP on access where there are no known
+ * registers.
+ * -
+ *
+ * */
+
/*******************************************************************************
* Header Files *
@@ -97,6 +104,7 @@
#include <VBox/vmm/cpum.h>
#include <VBox/vmm/pgm.h>
#include <VBox/sup.h>
+#include <VBox/vmm/hm.h>
#include <VBox/vmm/mm.h>
#include <VBox/vmm/stam.h>
#include <VBox/vmm/dbgf.h>
@@ -158,7 +166,11 @@ VMMR3_INT_DECL(int) IOMR3Init(PVM pVM)
/*
* Initialize the REM critical section.
*/
+#ifdef IOM_WITH_CRIT_SECT_RW
+ int rc = PDMR3CritSectRwInit(pVM, &pVM->iom.s.CritSect, RT_SRC_POS, "IOM Lock");
+#else
int rc = PDMR3CritSectInit(pVM, &pVM->iom.s.CritSect, RT_SRC_POS, "IOM Lock");
+#endif
AssertRCReturn(rc, rc);
/*
@@ -226,33 +238,41 @@ VMMR3_INT_DECL(int) IOMR3Init(PVM pVM)
*/
static void iomR3FlushCache(PVM pVM)
{
- IOM_LOCK(pVM);
-
/*
- * Caching of port and statistics (saves some time in rep outs/ins instruction emulation)
+ * Since all relevant (1) cache use requires at least read access to the
+ * critical section, we can exclude all other EMTs by grabbing exclusive
+ * access to the critical section and then safely update the caches of
+ * other EMTs.
+ * (1) The irrelvant access not holding the lock is in assertion code.
*/
- pVM->iom.s.pRangeLastReadR0 = NIL_RTR0PTR;
- pVM->iom.s.pRangeLastWriteR0 = NIL_RTR0PTR;
- pVM->iom.s.pStatsLastReadR0 = NIL_RTR0PTR;
- pVM->iom.s.pStatsLastWriteR0 = NIL_RTR0PTR;
- pVM->iom.s.pMMIORangeLastR0 = NIL_RTR0PTR;
- pVM->iom.s.pMMIOStatsLastR0 = NIL_RTR0PTR;
-
- pVM->iom.s.pRangeLastReadR3 = NULL;
- pVM->iom.s.pRangeLastWriteR3 = NULL;
- pVM->iom.s.pStatsLastReadR3 = NULL;
- pVM->iom.s.pStatsLastWriteR3 = NULL;
- pVM->iom.s.pMMIORangeLastR3 = NULL;
- pVM->iom.s.pMMIOStatsLastR3 = NULL;
-
- pVM->iom.s.pRangeLastReadRC = NIL_RTRCPTR;
- pVM->iom.s.pRangeLastWriteRC = NIL_RTRCPTR;
- pVM->iom.s.pStatsLastReadRC = NIL_RTRCPTR;
- pVM->iom.s.pStatsLastWriteRC = NIL_RTRCPTR;
- pVM->iom.s.pMMIORangeLastRC = NIL_RTRCPTR;
- pVM->iom.s.pMMIOStatsLastRC = NIL_RTRCPTR;
-
- IOM_UNLOCK(pVM);
+ IOM_LOCK_EXCL(pVM);
+ VMCPUID iCpu = pVM->cCpus;
+ while (iCpu-- > 0)
+ {
+ PVMCPU pVCpu = &pVM->aCpus[iCpu];
+ pVCpu->iom.s.pRangeLastReadR0 = NIL_RTR0PTR;
+ pVCpu->iom.s.pRangeLastWriteR0 = NIL_RTR0PTR;
+ pVCpu->iom.s.pStatsLastReadR0 = NIL_RTR0PTR;
+ pVCpu->iom.s.pStatsLastWriteR0 = NIL_RTR0PTR;
+ pVCpu->iom.s.pMMIORangeLastR0 = NIL_RTR0PTR;
+ pVCpu->iom.s.pMMIOStatsLastR0 = NIL_RTR0PTR;
+
+ pVCpu->iom.s.pRangeLastReadR3 = NULL;
+ pVCpu->iom.s.pRangeLastWriteR3 = NULL;
+ pVCpu->iom.s.pStatsLastReadR3 = NULL;
+ pVCpu->iom.s.pStatsLastWriteR3 = NULL;
+ pVCpu->iom.s.pMMIORangeLastR3 = NULL;
+ pVCpu->iom.s.pMMIOStatsLastR3 = NULL;
+
+ pVCpu->iom.s.pRangeLastReadRC = NIL_RTRCPTR;
+ pVCpu->iom.s.pRangeLastWriteRC = NIL_RTRCPTR;
+ pVCpu->iom.s.pStatsLastReadRC = NIL_RTRCPTR;
+ pVCpu->iom.s.pStatsLastWriteRC = NIL_RTRCPTR;
+ pVCpu->iom.s.pMMIORangeLastRC = NIL_RTRCPTR;
+ pVCpu->iom.s.pMMIOStatsLastRC = NIL_RTRCPTR;
+ }
+
+ IOM_UNLOCK_EXCL(pVM);
}
@@ -288,24 +308,23 @@ VMMR3_INT_DECL(void) IOMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
RTAvlroIOPortDoWithAll(&pVM->iom.s.pTreesR3->IOPortTreeRC, true, iomR3RelocateIOPortCallback, &offDelta);
RTAvlroGCPhysDoWithAll(&pVM->iom.s.pTreesR3->MMIOTree, true, iomR3RelocateMMIOCallback, &offDelta);
- if (pVM->iom.s.pfnMMIOHandlerRC)
+ if (pVM->iom.s.pfnMMIOHandlerRC != NIL_RTRCPTR)
pVM->iom.s.pfnMMIOHandlerRC += offDelta;
/*
- * Apply relocations to the cached GC handlers
+ * Reset the raw-mode cache (don't bother relocating it).
*/
- if (pVM->iom.s.pRangeLastReadRC)
- pVM->iom.s.pRangeLastReadRC += offDelta;
- if (pVM->iom.s.pRangeLastWriteRC)
- pVM->iom.s.pRangeLastWriteRC += offDelta;
- if (pVM->iom.s.pStatsLastReadRC)
- pVM->iom.s.pStatsLastReadRC += offDelta;
- if (pVM->iom.s.pStatsLastWriteRC)
- pVM->iom.s.pStatsLastWriteRC += offDelta;
- if (pVM->iom.s.pMMIORangeLastRC)
- pVM->iom.s.pMMIORangeLastRC += offDelta;
- if (pVM->iom.s.pMMIOStatsLastRC)
- pVM->iom.s.pMMIOStatsLastRC += offDelta;
+ VMCPUID iCpu = pVM->cCpus;
+ while (iCpu-- > 0)
+ {
+ PVMCPU pVCpu = &pVM->aCpus[iCpu];
+ pVCpu->iom.s.pRangeLastReadRC = NIL_RTRCPTR;
+ pVCpu->iom.s.pRangeLastWriteRC = NIL_RTRCPTR;
+ pVCpu->iom.s.pStatsLastReadRC = NIL_RTRCPTR;
+ pVCpu->iom.s.pStatsLastWriteRC = NIL_RTRCPTR;
+ pVCpu->iom.s.pMMIORangeLastRC = NIL_RTRCPTR;
+ pVCpu->iom.s.pMMIOStatsLastRC = NIL_RTRCPTR;
+ }
}
@@ -396,13 +415,17 @@ VMMR3_INT_DECL(int) IOMR3Term(PVM pVM)
* @param Port Port.
* @param pszDesc Description.
*/
-PIOMIOPORTSTATS iomR3IOPortStatsCreate(PVM pVM, RTIOPORT Port, const char *pszDesc)
+static PIOMIOPORTSTATS iomR3IOPortStatsCreate(PVM pVM, RTIOPORT Port, const char *pszDesc)
{
- Assert(IOMIsLockOwner(pVM));
+ IOM_LOCK_EXCL(pVM);
+
/* check if it already exists. */
PIOMIOPORTSTATS pPort = (PIOMIOPORTSTATS)RTAvloIOPortGet(&pVM->iom.s.pTreesR3->IOPortStatTree, Port);
if (pPort)
+ {
+ IOM_UNLOCK_EXCL(pVM);
return pPort;
+ }
/* allocate stats node. */
int rc = MMHyperAlloc(pVM, sizeof(*pPort), 0, MM_TAG_IOM_STATS, (void **)&pPort);
@@ -413,6 +436,8 @@ PIOMIOPORTSTATS iomR3IOPortStatsCreate(PVM pVM, RTIOPORT Port, const char *pszDe
pPort->Core.Key = Port;
if (RTAvloIOPortInsert(&pVM->iom.s.pTreesR3->IOPortStatTree, &pPort->Core))
{
+ IOM_UNLOCK_EXCL(pVM);
+
/* put a name on common ports. */
if (!pszDesc)
pszDesc = iomR3IOPortGetStandardName(Port);
@@ -433,9 +458,11 @@ PIOMIOPORTSTATS iomR3IOPortStatsCreate(PVM pVM, RTIOPORT Port, const char *pszDe
return pPort;
}
+
AssertMsgFailed(("what! Port=%d\n", Port));
MMHyperFree(pVM, pPort);
}
+ IOM_UNLOCK_EXCL(pVM);
return NULL;
}
@@ -451,14 +478,15 @@ PIOMIOPORTSTATS iomR3IOPortStatsCreate(PVM pVM, RTIOPORT Port, const char *pszDe
*/
PIOMMMIOSTATS iomR3MMIOStatsCreate(PVM pVM, RTGCPHYS GCPhys, const char *pszDesc)
{
- Assert(IOMIsLockOwner(pVM));
-#ifdef DEBUG_sandervl
- AssertGCPhys32(GCPhys);
-#endif
+ IOM_LOCK_EXCL(pVM);
+
/* check if it already exists. */
PIOMMMIOSTATS pStats = (PIOMMMIOSTATS)RTAvloGCPhysGet(&pVM->iom.s.pTreesR3->MmioStatTree, GCPhys);
if (pStats)
+ {
+ IOM_UNLOCK_EXCL(pVM);
return pStats;
+ }
/* allocate stats node. */
int rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_IOM_STATS, (void **)&pStats);
@@ -469,6 +497,8 @@ PIOMMMIOSTATS iomR3MMIOStatsCreate(PVM pVM, RTGCPHYS GCPhys, const char *pszDesc
pStats->Core.Key = GCPhys;
if (RTAvloGCPhysInsert(&pVM->iom.s.pTreesR3->MmioStatTree, &pStats->Core))
{
+ IOM_UNLOCK_EXCL(pVM);
+
rc = STAMR3RegisterF(pVM, &pStats->Accesses, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, pszDesc, "/IOM/MMIO/%RGp", GCPhys); AssertRC(rc);
rc = STAMR3RegisterF(pVM, &pStats->ProfReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, pszDesc, "/IOM/MMIO/%RGp/Read-R3", GCPhys); AssertRC(rc);
rc = STAMR3RegisterF(pVM, &pStats->ProfWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, pszDesc, "/IOM/MMIO/%RGp/Write-R3", GCPhys); AssertRC(rc);
@@ -482,6 +512,7 @@ PIOMMMIOSTATS iomR3MMIOStatsCreate(PVM pVM, RTGCPHYS GCPhys, const char *pszDesc
AssertMsgFailed(("what! GCPhys=%RGp\n", GCPhys));
MMHyperFree(pVM, pStats);
}
+ IOM_UNLOCK_EXCL(pVM);
return NULL;
}
@@ -563,20 +594,20 @@ VMMR3_INT_DECL(int) IOMR3IOPortRegisterR3(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
/*
* Try Insert it.
*/
- IOM_LOCK(pVM);
+ IOM_LOCK_EXCL(pVM);
if (RTAvlroIOPortInsert(&pVM->iom.s.pTreesR3->IOPortTreeR3, &pRange->Core))
{
#ifdef VBOX_WITH_STATISTICS
for (unsigned iPort = 0; iPort < cPorts; iPort++)
iomR3IOPortStatsCreate(pVM, PortStart + iPort, pszDesc);
#endif
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return VINF_SUCCESS;
}
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
/* conflict. */
- DBGFR3Info(pVM, "ioport", NULL, NULL);
+ DBGFR3Info(pVM->pUVM, "ioport", NULL, NULL);
AssertMsgFailed(("Port range %#x-%#x (%s) conflicts with existing range(s)!\n", PortStart, (unsigned)PortStart + cPorts - 1, pszDesc));
MMHyperFree(pVM, pRange);
rc = VERR_IOM_IOPORT_RANGE_CONFLICT;
@@ -612,6 +643,7 @@ VMMR3_INT_DECL(int) IOMR3IOPortRegisterRC(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
{
LogFlow(("IOMR3IOPortRegisterRC: pDevIns=%p PortStart=%#x cPorts=%#x pvUser=%RRv pfnOutCallback=%RRv pfnInCallback=%RRv pfnOutStrCallback=%RRv pfnInStrCallback=%RRv pszDesc=%s\n",
pDevIns, PortStart, cPorts, pvUser, pfnOutCallback, pfnInCallback, pfnOutStrCallback, pfnInStrCallback, pszDesc));
+ AssertReturn(!HMIsEnabled(pVM), VERR_IOM_HM_IPE);
/*
* Validate input.
@@ -629,7 +661,7 @@ VMMR3_INT_DECL(int) IOMR3IOPortRegisterRC(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
return VERR_INVALID_PARAMETER;
}
- IOM_LOCK(pVM);
+ IOM_LOCK_EXCL(pVM);
/*
* Validate that there are ring-3 ranges for the ports.
@@ -641,7 +673,7 @@ VMMR3_INT_DECL(int) IOMR3IOPortRegisterRC(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
if (!pRange)
{
AssertMsgFailed(("No R3! Port=#x %#x-%#x! (%s)\n", Port, PortStart, (unsigned)PortStart + cPorts - 1, pszDesc));
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return VERR_IOM_NO_R3_IOPORT_RANGE;
}
#ifndef IOM_NO_PDMINS_CHECKS
@@ -652,7 +684,7 @@ VMMR3_INT_DECL(int) IOMR3IOPortRegisterRC(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
# endif
{
AssertMsgFailed(("Not owner! Port=%#x %#x-%#x! (%s)\n", Port, PortStart, (unsigned)PortStart + cPorts - 1, pszDesc));
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return VERR_IOM_NOT_IOPORT_RANGE_OWNER;
}
#endif
@@ -686,7 +718,7 @@ VMMR3_INT_DECL(int) IOMR3IOPortRegisterRC(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
*/
if (RTAvlroIOPortInsert(&pVM->iom.s.CTX_SUFF(pTrees)->IOPortTreeRC, &pRange->Core))
{
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return VINF_SUCCESS;
}
@@ -695,7 +727,7 @@ VMMR3_INT_DECL(int) IOMR3IOPortRegisterRC(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
MMHyperFree(pVM, pRange);
rc = VERR_IOM_IOPORT_RANGE_CONFLICT;
}
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return rc;
}
@@ -744,7 +776,8 @@ VMMR3_INT_DECL(int) IOMR3IOPortRegisterR0(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
return VERR_INVALID_PARAMETER;
}
- IOM_LOCK(pVM);
+ IOM_LOCK_EXCL(pVM);
+
/*
* Validate that there are ring-3 ranges for the ports.
*/
@@ -755,7 +788,7 @@ VMMR3_INT_DECL(int) IOMR3IOPortRegisterR0(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
if (!pRange)
{
AssertMsgFailed(("No R3! Port=#x %#x-%#x! (%s)\n", Port, PortStart, (unsigned)PortStart + cPorts - 1, pszDesc));
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return VERR_IOM_NO_R3_IOPORT_RANGE;
}
#ifndef IOM_NO_PDMINS_CHECKS
@@ -766,7 +799,7 @@ VMMR3_INT_DECL(int) IOMR3IOPortRegisterR0(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
# endif
{
AssertMsgFailed(("Not owner! Port=%#x %#x-%#x! (%s)\n", Port, PortStart, (unsigned)PortStart + cPorts - 1, pszDesc));
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return VERR_IOM_NOT_IOPORT_RANGE_OWNER;
}
#endif
@@ -800,7 +833,7 @@ VMMR3_INT_DECL(int) IOMR3IOPortRegisterR0(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
*/
if (RTAvlroIOPortInsert(&pVM->iom.s.CTX_SUFF(pTrees)->IOPortTreeR0, &pRange->Core))
{
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return VINF_SUCCESS;
}
@@ -809,7 +842,7 @@ VMMR3_INT_DECL(int) IOMR3IOPortRegisterR0(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
MMHyperFree(pVM, pRange);
rc = VERR_IOM_IOPORT_RANGE_CONFLICT;
}
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return rc;
}
@@ -847,7 +880,7 @@ VMMR3_INT_DECL(int) IOMR3IOPortDeregister(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
return VERR_IOM_INVALID_IOPORT_RANGE;
}
- IOM_LOCK(pVM);
+ IOM_LOCK_EXCL(pVM);
/* Flush the IO port lookup cache */
iomR3FlushCache(pVM);
@@ -868,7 +901,7 @@ VMMR3_INT_DECL(int) IOMR3IOPortDeregister(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
{
AssertMsgFailed(("Removal of ports in range %#x-%#x rejected because not owner of %#x-%#x (%s)\n",
PortStart, PortLast, pRange->Core.Key, pRange->Core.KeyLast, pRange->pszDesc));
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return VERR_IOM_NOT_IOPORT_RANGE_OWNER;
}
#endif /* !IOM_NO_PDMINS_CHECKS */
@@ -932,7 +965,7 @@ VMMR3_INT_DECL(int) IOMR3IOPortDeregister(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
int rc2 = MMHyperAlloc(pVM, sizeof(*pRangeNew), 0, MM_TAG_IOM, (void **)&pRangeNew);
if (RT_FAILURE(rc2))
{
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return rc2;
}
*pRangeNew = *pRange;
@@ -1015,7 +1048,7 @@ VMMR3_INT_DECL(int) IOMR3IOPortDeregister(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
int rc2 = MMHyperAlloc(pVM, sizeof(*pRangeNew), 0, MM_TAG_IOM, (void **)&pRangeNew);
if (RT_FAILURE(rc2))
{
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return rc2;
}
*pRangeNew = *pRange;
@@ -1097,7 +1130,7 @@ VMMR3_INT_DECL(int) IOMR3IOPortDeregister(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
int rc2 = MMHyperAlloc(pVM, sizeof(*pRangeNew), 0, MM_TAG_IOM, (void **)&pRangeNew);
if (RT_FAILURE(rc2))
{
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return rc2;
}
*pRangeNew = *pRange;
@@ -1126,7 +1159,7 @@ VMMR3_INT_DECL(int) IOMR3IOPortDeregister(PVM pVM, PPDMDEVINS pDevIns, RTIOPORT
} /* for all ports - ring-3. */
/* done */
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return rc;
}
@@ -1303,84 +1336,6 @@ static DECLCALLBACK(void) iomR3IOPortInfo(PVM pVM, PCDBGFINFOHLP pHlp, const cha
sizeof(RTRCPTR) * 2, "Out ",
sizeof(RTRCPTR) * 2, "pvUser ");
RTAvlroIOPortDoWithAll(&pVM->iom.s.pTreesR3->IOPortTreeRC, true, iomR3IOPortInfoOneRC, (void *)pHlp);
-
- if (pVM->iom.s.pRangeLastReadRC)
- {
- PIOMIOPORTRANGERC pRange = (PIOMIOPORTRANGERC)MMHyperRCToCC(pVM, pVM->iom.s.pRangeLastReadRC);
- pHlp->pfnPrintf(pHlp, "RC Read Ports: %#04x-%#04x %RRv %s\n",
- pRange->Port, pRange->Port + pRange->cPorts, pVM->iom.s.pRangeLastReadRC, pRange->pszDesc);
- }
- if (pVM->iom.s.pStatsLastReadRC)
- {
- PIOMIOPORTSTATS pRange = (PIOMIOPORTSTATS)MMHyperRCToCC(pVM, pVM->iom.s.pStatsLastReadRC);
- pHlp->pfnPrintf(pHlp, "RC Read Stats: %#04x %RRv\n",
- pRange->Core.Key, pVM->iom.s.pStatsLastReadRC);
- }
-
- if (pVM->iom.s.pRangeLastWriteRC)
- {
- PIOMIOPORTRANGERC pRange = (PIOMIOPORTRANGERC)MMHyperRCToCC(pVM, pVM->iom.s.pRangeLastWriteRC);
- pHlp->pfnPrintf(pHlp, "RC Write Ports: %#04x-%#04x %RRv %s\n",
- pRange->Port, pRange->Port + pRange->cPorts, pVM->iom.s.pRangeLastWriteRC, pRange->pszDesc);
- }
- if (pVM->iom.s.pStatsLastWriteRC)
- {
- PIOMIOPORTSTATS pRange = (PIOMIOPORTSTATS)MMHyperRCToCC(pVM, pVM->iom.s.pStatsLastWriteRC);
- pHlp->pfnPrintf(pHlp, "RC Write Stats: %#04x %RRv\n",
- pRange->Core.Key, pVM->iom.s.pStatsLastWriteRC);
- }
-
- if (pVM->iom.s.pRangeLastReadR3)
- {
- PIOMIOPORTRANGER3 pRange = pVM->iom.s.pRangeLastReadR3;
- pHlp->pfnPrintf(pHlp, "R3 Read Ports: %#04x-%#04x %p %s\n",
- pRange->Port, pRange->Port + pRange->cPorts, pRange, pRange->pszDesc);
- }
- if (pVM->iom.s.pStatsLastReadR3)
- {
- PIOMIOPORTSTATS pRange = pVM->iom.s.pStatsLastReadR3;
- pHlp->pfnPrintf(pHlp, "R3 Read Stats: %#04x %p\n",
- pRange->Core.Key, pRange);
- }
-
- if (pVM->iom.s.pRangeLastWriteR3)
- {
- PIOMIOPORTRANGER3 pRange = pVM->iom.s.pRangeLastWriteR3;
- pHlp->pfnPrintf(pHlp, "R3 Write Ports: %#04x-%#04x %p %s\n",
- pRange->Port, pRange->Port + pRange->cPorts, pRange, pRange->pszDesc);
- }
- if (pVM->iom.s.pStatsLastWriteR3)
- {
- PIOMIOPORTSTATS pRange = pVM->iom.s.pStatsLastWriteR3;
- pHlp->pfnPrintf(pHlp, "R3 Write Stats: %#04x %p\n",
- pRange->Core.Key, pRange);
- }
-
- if (pVM->iom.s.pRangeLastReadR0)
- {
- PIOMIOPORTRANGER0 pRange = (PIOMIOPORTRANGER0)MMHyperR0ToCC(pVM, pVM->iom.s.pRangeLastReadR0);
- pHlp->pfnPrintf(pHlp, "R0 Read Ports: %#04x-%#04x %p %s\n",
- pRange->Port, pRange->Port + pRange->cPorts, pRange, pRange->pszDesc);
- }
- if (pVM->iom.s.pStatsLastReadR0)
- {
- PIOMIOPORTSTATS pRange = (PIOMIOPORTSTATS)MMHyperR0ToCC(pVM, pVM->iom.s.pStatsLastReadR0);
- pHlp->pfnPrintf(pHlp, "R0 Read Stats: %#04x %p\n",
- pRange->Core.Key, pRange);
- }
-
- if (pVM->iom.s.pRangeLastWriteR0)
- {
- PIOMIOPORTRANGER0 pRange = (PIOMIOPORTRANGER0)MMHyperR0ToCC(pVM, pVM->iom.s.pRangeLastWriteR0);
- pHlp->pfnPrintf(pHlp, "R0 Write Ports: %#04x-%#04x %p %s\n",
- pRange->Port, pRange->Port + pRange->cPorts, pRange, pRange->pszDesc);
- }
- if (pVM->iom.s.pStatsLastWriteR0)
- {
- PIOMIOPORTSTATS pRange = (PIOMIOPORTSTATS)MMHyperR0ToCC(pVM, pVM->iom.s.pStatsLastWriteR0);
- pHlp->pfnPrintf(pHlp, "R0 Write Stats: %#04x %p\n",
- pRange->Core.Key, pRange);
- }
}
@@ -1417,8 +1372,8 @@ IOMR3MmioRegisterR3(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, uint32_t
AssertMsgReturn(GCPhysStart + (cbRange - 1) >= GCPhysStart,("Wrapped! %RGp %#x bytes\n", GCPhysStart, cbRange),
VERR_IOM_INVALID_MMIO_RANGE);
AssertMsgReturn( !(fFlags & ~IOMMMIO_FLAGS_VALID_MASK)
- || (fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_MODE
- || (fFlags & IOMMMIO_FLAGS_WRITE_MODE) > IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
+ && (fFlags & IOMMMIO_FLAGS_READ_MODE) <= IOMMMIO_FLAGS_READ_DWORD_QWORD
+ && (fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD,
("%#x\n", fFlags),
VERR_INVALID_PARAMETER);
@@ -1427,8 +1382,11 @@ IOMR3MmioRegisterR3(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, uint32_t
*/
if (pVM->iom.s.pfnMMIOHandlerR0 == NIL_RTR0PTR)
{
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "IOMMMIOHandler", &pVM->iom.s.pfnMMIOHandlerRC);
- AssertLogRelRCReturn(rc, rc);
+ if (!HMIsEnabled(pVM))
+ {
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "IOMMMIOHandler", &pVM->iom.s.pfnMMIOHandlerRC);
+ AssertLogRelRCReturn(rc, rc);
+ }
rc = PDMR3LdrGetSymbolR0(pVM, NULL, "IOMMMIOHandler", &pVM->iom.s.pfnMMIOHandlerR0);
AssertLogRelRCReturn(rc, rc);
}
@@ -1470,28 +1428,26 @@ IOMR3MmioRegisterR3(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, uint32_t
/*
* Try register it with PGM and then insert it into the tree.
*/
- IOM_LOCK(pVM);
- iomR3FlushCache(pVM);
rc = PGMR3PhysMMIORegister(pVM, GCPhysStart, cbRange,
IOMR3MMIOHandler, pRange,
pVM->iom.s.pfnMMIOHandlerR0, MMHyperR3ToR0(pVM, pRange),
pVM->iom.s.pfnMMIOHandlerRC, MMHyperR3ToRC(pVM, pRange), pszDesc);
if (RT_SUCCESS(rc))
{
+ IOM_LOCK_EXCL(pVM);
if (RTAvlroGCPhysInsert(&pVM->iom.s.pTreesR3->MMIOTree, &pRange->Core))
{
- IOM_UNLOCK(pVM);
+ iomR3FlushCache(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return VINF_SUCCESS;
}
/* bail out */
- IOM_UNLOCK(pVM);
- DBGFR3Info(pVM, "mmio", NULL, NULL);
+ IOM_UNLOCK_EXCL(pVM);
+ DBGFR3Info(pVM->pUVM, "mmio", NULL, NULL);
AssertMsgFailed(("This cannot happen!\n"));
rc = VERR_IOM_IOPORT_IPE_3;
}
- else
- IOM_UNLOCK(pVM);
MMHyperFree(pVM, pRange);
}
@@ -1518,6 +1474,7 @@ IOMR3MmioRegisterR3(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, uint32_t
* @param pfnWriteCallback Pointer to function which is gonna handle Write operations.
* @param pfnReadCallback Pointer to function which is gonna handle Read operations.
* @param pfnFillCallback Pointer to function which is gonna handle Fill/memset operations.
+ * @thread EMT
*/
VMMR3_INT_DECL(int)
IOMR3MmioRegisterRC(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, uint32_t cbRange, RTGCPTR pvUser,
@@ -1526,6 +1483,7 @@ IOMR3MmioRegisterRC(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, uint32_t
{
LogFlow(("IOMR3MmioRegisterRC: pDevIns=%p GCPhysStart=%RGp cbRange=%#x pvUser=%RGv pfnWriteCallback=%#x pfnReadCallback=%#x pfnFillCallback=%#x\n",
pDevIns, GCPhysStart, cbRange, pvUser, pfnWriteCallback, pfnReadCallback, pfnFillCallback));
+ AssertReturn(!HMIsEnabled(pVM), VERR_IOM_HM_IPE);
/*
* Validate input.
@@ -1535,23 +1493,24 @@ IOMR3MmioRegisterRC(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, uint32_t
AssertMsgFailed(("No callbacks! %RGp LB%#x %s\n", GCPhysStart, cbRange));
return VERR_INVALID_PARAMETER;
}
+ PVMCPU pVCpu = VMMGetCpu(pVM); Assert(pVCpu);
/*
* Find the MMIO range and check that the input matches.
*/
- IOM_LOCK(pVM);
- PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhysStart);
- AssertReturnStmt(pRange, IOM_UNLOCK(pVM), VERR_IOM_MMIO_RANGE_NOT_FOUND);
- AssertReturnStmt(pRange->pDevInsR3 == pDevIns, IOM_UNLOCK(pVM), VERR_IOM_NOT_MMIO_RANGE_OWNER);
- AssertReturnStmt(pRange->GCPhys == GCPhysStart, IOM_UNLOCK(pVM), VERR_IOM_INVALID_MMIO_RANGE);
- AssertReturnStmt(pRange->cb == cbRange, IOM_UNLOCK(pVM), VERR_IOM_INVALID_MMIO_RANGE);
+ IOM_LOCK_EXCL(pVM);
+ PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysStart);
+ AssertReturnStmt(pRange, IOM_UNLOCK_EXCL(pVM), VERR_IOM_MMIO_RANGE_NOT_FOUND);
+ AssertReturnStmt(pRange->pDevInsR3 == pDevIns, IOM_UNLOCK_EXCL(pVM), VERR_IOM_NOT_MMIO_RANGE_OWNER);
+ AssertReturnStmt(pRange->GCPhys == GCPhysStart, IOM_UNLOCK_EXCL(pVM), VERR_IOM_INVALID_MMIO_RANGE);
+ AssertReturnStmt(pRange->cb == cbRange, IOM_UNLOCK_EXCL(pVM), VERR_IOM_INVALID_MMIO_RANGE);
pRange->pvUserRC = pvUser;
pRange->pfnReadCallbackRC = pfnReadCallback;
pRange->pfnWriteCallbackRC= pfnWriteCallback;
pRange->pfnFillCallbackRC = pfnFillCallback;
pRange->pDevInsRC = MMHyperCCToRC(pVM, pDevIns);
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return VINF_SUCCESS;
}
@@ -1574,6 +1533,7 @@ IOMR3MmioRegisterRC(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, uint32_t
* @param pfnWriteCallback Pointer to function which is gonna handle Write operations.
* @param pfnReadCallback Pointer to function which is gonna handle Read operations.
* @param pfnFillCallback Pointer to function which is gonna handle Fill/memset operations.
+ * @thread EMT
*/
VMMR3_INT_DECL(int)
IOMR3MmioRegisterR0(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, uint32_t cbRange, RTR0PTR pvUser,
@@ -1592,23 +1552,24 @@ IOMR3MmioRegisterR0(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, uint32_t
AssertMsgFailed(("No callbacks! %RGp LB%#x %s\n", GCPhysStart, cbRange));
return VERR_INVALID_PARAMETER;
}
+ PVMCPU pVCpu = VMMGetCpu(pVM); Assert(pVCpu);
/*
* Find the MMIO range and check that the input matches.
*/
- IOM_LOCK(pVM);
- PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhysStart);
- AssertReturnStmt(pRange, IOM_UNLOCK(pVM), VERR_IOM_MMIO_RANGE_NOT_FOUND);
- AssertReturnStmt(pRange->pDevInsR3 == pDevIns, IOM_UNLOCK(pVM), VERR_IOM_NOT_MMIO_RANGE_OWNER);
- AssertReturnStmt(pRange->GCPhys == GCPhysStart, IOM_UNLOCK(pVM), VERR_IOM_INVALID_MMIO_RANGE);
- AssertReturnStmt(pRange->cb == cbRange, IOM_UNLOCK(pVM), VERR_IOM_INVALID_MMIO_RANGE);
+ IOM_LOCK_EXCL(pVM);
+ PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysStart);
+ AssertReturnStmt(pRange, IOM_UNLOCK_EXCL(pVM), VERR_IOM_MMIO_RANGE_NOT_FOUND);
+ AssertReturnStmt(pRange->pDevInsR3 == pDevIns, IOM_UNLOCK_EXCL(pVM), VERR_IOM_NOT_MMIO_RANGE_OWNER);
+ AssertReturnStmt(pRange->GCPhys == GCPhysStart, IOM_UNLOCK_EXCL(pVM), VERR_IOM_INVALID_MMIO_RANGE);
+ AssertReturnStmt(pRange->cb == cbRange, IOM_UNLOCK_EXCL(pVM), VERR_IOM_INVALID_MMIO_RANGE);
pRange->pvUserR0 = pvUser;
pRange->pfnReadCallbackR0 = pfnReadCallback;
pRange->pfnWriteCallbackR0= pfnWriteCallback;
pRange->pfnFillCallbackR0 = pfnFillCallback;
pRange->pDevInsR0 = MMHyperCCToR0(pVM, pDevIns);
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return VINF_SUCCESS;
}
@@ -1642,8 +1603,9 @@ VMMR3_INT_DECL(int) IOMR3MmioDeregister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GC
AssertMsgFailed(("Wrapped! %#x LB%#x\n", GCPhysStart, cbRange));
return VERR_IOM_INVALID_MMIO_RANGE;
}
+ PVMCPU pVCpu = VMMGetCpu(pVM); Assert(pVCpu);
- IOM_LOCK(pVM);
+ IOM_LOCK_EXCL(pVM);
/*
* Check ownership and such for the entire area.
@@ -1651,19 +1613,19 @@ VMMR3_INT_DECL(int) IOMR3MmioDeregister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GC
RTGCPHYS GCPhys = GCPhysStart;
while (GCPhys <= GCPhysLast && GCPhys >= GCPhysStart)
{
- PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
+ PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
if (!pRange)
{
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return VERR_IOM_MMIO_RANGE_NOT_FOUND;
}
AssertMsgReturnStmt(pRange->pDevInsR3 == pDevIns,
("Not owner! GCPhys=%RGp %RGp LB%#x %s\n", GCPhys, GCPhysStart, cbRange, pRange->pszDesc),
- IOM_UNLOCK(pVM),
+ IOM_UNLOCK_EXCL(pVM),
VERR_IOM_NOT_MMIO_RANGE_OWNER);
AssertMsgReturnStmt(pRange->Core.KeyLast <= GCPhysLast,
("Incomplete R3 range! GCPhys=%RGp %RGp LB%#x %s\n", GCPhys, GCPhysStart, cbRange, pRange->pszDesc),
- IOM_UNLOCK(pVM),
+ IOM_UNLOCK_EXCL(pVM),
VERR_IOM_INCOMPLETE_MMIO_RANGE);
/* next */
@@ -1682,13 +1644,13 @@ VMMR3_INT_DECL(int) IOMR3MmioDeregister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GC
PIOMMMIORANGE pRange = (PIOMMMIORANGE)RTAvlroGCPhysRemove(&pVM->iom.s.pTreesR3->MMIOTree, GCPhys);
Assert(pRange);
Assert(pRange->Core.Key == GCPhys && pRange->Core.KeyLast <= GCPhysLast);
- IOM_UNLOCK(pVM); /** @todo r=bird: Why are we leaving the lock here? We don't leave it when registering the range above... */
+ IOM_UNLOCK_EXCL(pVM); /* Lock order fun. */
/* remove it from PGM */
int rc = PGMR3PhysMMIODeregister(pVM, GCPhys, pRange->cb);
AssertRC(rc);
- IOM_LOCK(pVM);
+ IOM_LOCK_EXCL(pVM);
/* advance and free. */
GCPhys = pRange->Core.KeyLast + 1;
@@ -1700,7 +1662,7 @@ VMMR3_INT_DECL(int) IOMR3MmioDeregister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GC
iomMmioReleaseRange(pVM, pRange);
}
- IOM_UNLOCK(pVM);
+ IOM_UNLOCK_EXCL(pVM);
return VINF_SUCCESS;
}
diff --git a/src/VBox/VMM/VMMR3/MM.cpp b/src/VBox/VMM/VMMR3/MM.cpp
index b317c29c..a5987395 100644
--- a/src/VBox/VMM/VMMR3/MM.cpp
+++ b/src/VBox/VMM/VMMR3/MM.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
diff --git a/src/VBox/VMM/VMMR3/MMHeap.cpp b/src/VBox/VMM/VMMR3/MMHeap.cpp
index 39e3249e..9c52ac6a 100644
--- a/src/VBox/VMM/VMMR3/MMHeap.cpp
+++ b/src/VBox/VMM/VMMR3/MMHeap.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
diff --git a/src/VBox/VMM/VMMR3/MMHyper.cpp b/src/VBox/VMM/VMMR3/MMHyper.cpp
index 86db83a8..9143d988 100644
--- a/src/VBox/VMM/VMMR3/MMHyper.cpp
+++ b/src/VBox/VMM/VMMR3/MMHyper.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -22,6 +22,7 @@
#define LOG_GROUP LOG_GROUP_MM_HYPER
#include <VBox/vmm/pgm.h>
#include <VBox/vmm/mm.h>
+#include <VBox/vmm/hm.h>
#include <VBox/vmm/dbgf.h>
#include "MMInternal.h"
#include <VBox/vmm/vm.h>
@@ -55,9 +56,6 @@ static uint32_t mmR3ComputeHyperHeapSize(PVM pVM)
/*
* Gather parameters.
*/
- bool const fHwVirtExtForced = VMMIsHwVirtExtForced(pVM)
- || pVM->cCpus > 1;
-
bool fCanUseLargerHeap;
int rc = CFGMR3QueryBoolDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM"), "CanUseLargerHeap", &fCanUseLargerHeap, false);
AssertStmt(RT_SUCCESS(rc), fCanUseLargerHeap = false);
@@ -71,7 +69,7 @@ static uint32_t mmR3ComputeHyperHeapSize(PVM pVM)
* so lets filter out that case first.
*/
if ( !fCanUseLargerHeap
- && !fHwVirtExtForced
+ && !HMIsEnabled(pVM)
&& cbRam < 16*_1G64)
return 1280 * _1K;
@@ -810,17 +808,14 @@ static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, P
0 /*fFlags*/,
&pv,
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
- VMMIsHwVirtExtForced(pVM) ? &pvR0 : NULL,
+ &pvR0,
#else
NULL,
#endif
paPages);
if (RT_SUCCESS(rc))
{
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
- if (!VMMIsHwVirtExtForced(pVM))
- pvR0 = NIL_RTR0PTR;
-#else
+#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
pvR0 = (uintptr_t)pv;
#endif
memset(pv, 0, cbAligned);
@@ -974,7 +969,7 @@ VMMR3DECL(int) MMR3HyperAllocOnceNoRelEx(PVM pVM, size_t cb, unsigned uAlignment
/*
* Set MMHYPER_AONR_FLAGS_KERNEL_MAPPING if we're in going to execute in ring-0.
*/
- if (VMMIsHwVirtExtForced(pVM))
+ if (HMIsEnabled(pVM))
fFlags |= MMHYPER_AONR_FLAGS_KERNEL_MAPPING;
#endif
diff --git a/src/VBox/VMM/VMMR3/MMPagePool.cpp b/src/VBox/VMM/VMMR3/MMPagePool.cpp
index 9491fafe..e40bb3d4 100644
--- a/src/VBox/VMM/VMMR3/MMPagePool.cpp
+++ b/src/VBox/VMM/VMMR3/MMPagePool.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
diff --git a/src/VBox/VMM/VMMR3/MMUkHeap.cpp b/src/VBox/VMM/VMMR3/MMUkHeap.cpp
index 255b43de..2c60d01d 100644
--- a/src/VBox/VMM/VMMR3/MMUkHeap.cpp
+++ b/src/VBox/VMM/VMMR3/MMUkHeap.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2009 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
diff --git a/src/VBox/VMM/VMMR3/PATM.cpp b/src/VBox/VMM/VMMR3/PATM.cpp
index 51a766c0..64773715 100644
--- a/src/VBox/VMM/VMMR3/PATM.cpp
+++ b/src/VBox/VMM/VMMR3/PATM.cpp
@@ -2,11 +2,11 @@
/** @file
* PATM - Dynamic Guest OS Patching Manager
*
- * NOTE: Never ever reuse patch memory!!
+ * @note Never ever reuse patch memory!!
*/
/*
- * Copyright (C) 2006-2012 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -29,16 +29,18 @@
#include <VBox/vmm/iom.h>
#include <VBox/vmm/mm.h>
#include <VBox/vmm/em.h>
+#include <VBox/vmm/hm.h>
#include <VBox/vmm/ssm.h>
#include <VBox/vmm/trpm.h>
#include <VBox/vmm/cfgm.h>
#include <VBox/param.h>
#include <VBox/vmm/selm.h>
+#include <VBox/vmm/csam.h>
#include <iprt/avl.h>
#include "PATMInternal.h"
#include "PATMPatch.h"
#include <VBox/vmm/vm.h>
-#include <VBox/vmm/csam.h>
+#include <VBox/vmm/uvm.h>
#include <VBox/dbg.h>
#include <VBox/err.h>
#include <VBox/log.h>
@@ -107,11 +109,13 @@ static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t
static int patmReinit(PVM pVM);
static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
+static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC);
+static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch);
#ifdef VBOX_WITH_DEBUGGER
static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
-static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
-static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
+static FNDBGCCMD patmr3CmdOn;
+static FNDBGCCMD patmr3CmdOff;
/** Command descriptors. */
static const DBGCCMD g_aCmds[] =
@@ -131,10 +135,22 @@ static unsigned int cIDTHandlersDisabled = 0;
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) PATMR3Init(PVM pVM)
+VMMR3_INT_DECL(int) PATMR3Init(PVM pVM)
{
int rc;
+ /*
+ * We only need a saved state dummy loader if HM is enabled.
+ */
+ if (HMIsEnabled(pVM))
+ {
+ pVM->fPATMEnabled = false;
+ return SSMR3RegisterStub(pVM, "PATM", 0);
+ }
+
+ /*
+ * Raw-mode.
+ */
Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
/* These values can't change as they are hardcoded in patch code (old saved states!) */
@@ -162,6 +178,8 @@ VMMR3DECL(int) PATMR3Init(PVM pVM)
pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
+ patmR3DbgInit(pVM);
+
/*
* Hypervisor memory for GC status data (read/write)
*
@@ -307,8 +325,11 @@ VMMR3DECL(int) PATMR3Init(PVM pVM)
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) PATMR3InitFinalize(PVM pVM)
+VMMR3_INT_DECL(int) PATMR3InitFinalize(PVM pVM)
{
+ if (HMIsEnabled(pVM))
+ return VINF_SUCCESS;
+
/* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
if (RT_FAILURE(rc))
@@ -404,6 +425,7 @@ static int patmReinit(PVM pVM)
pVM->patm.s.fOutOfMemory = false;
pVM->patm.s.pfnHelperCallGC = 0;
+ patmR3DbgReset(pVM);
/* Generate all global functions to be used by future patches. */
/* We generate a fake patch in order to use the existing code for relocation. */
@@ -424,6 +446,8 @@ static int patmReinit(PVM pVM)
pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
/* Round to next 8 byte boundary. */
pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
+
+
return rc;
}
@@ -437,8 +461,11 @@ static int patmReinit(PVM pVM)
*
* @param pVM The VM.
*/
-VMMR3DECL(void) PATMR3Relocate(PVM pVM)
+VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM)
{
+ if (HMIsEnabled(pVM))
+ return;
+
RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
@@ -492,10 +519,14 @@ VMMR3DECL(void) PATMR3Relocate(PVM pVM)
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) PATMR3Term(PVM pVM)
+VMMR3_INT_DECL(int) PATMR3Term(PVM pVM)
{
+ if (HMIsEnabled(pVM))
+ return VINF_SUCCESS;
+
+ patmR3DbgTerm(pVM);
+
/* Memory was all allocated from the two MM heaps and requires no freeing. */
- NOREF(pVM);
return VINF_SUCCESS;
}
@@ -506,18 +537,18 @@ VMMR3DECL(int) PATMR3Term(PVM pVM)
* @returns VBox status code.
* @param pVM The VM which is reset.
*/
-VMMR3DECL(int) PATMR3Reset(PVM pVM)
+VMMR3_INT_DECL(int) PATMR3Reset(PVM pVM)
{
Log(("PATMR3Reset\n"));
+ if (HMIsEnabled(pVM))
+ return VINF_SUCCESS;
/* Free all patches. */
- while (true)
+ for (;;)
{
PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
if (pPatchRec)
- {
- PATMRemovePatch(pVM, pPatchRec, true);
- }
+ patmR3RemovePatch(pVM, pPatchRec, true);
else
break;
}
@@ -920,8 +951,8 @@ DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void
return VINF_PGM_HANDLER_DO_DEFAULT;
}
-
#ifdef VBOX_WITH_DEBUGGER
+
/**
* Callback function for RTAvloU32DoWithAll
*
@@ -938,10 +969,8 @@ static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
return 0;
}
-#endif /* VBOX_WITH_DEBUGGER */
-#ifdef VBOX_WITH_DEBUGGER
/**
* Callback function for RTAvloU32DoWithAll
*
@@ -958,20 +987,23 @@ static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
return 0;
}
-#endif
+
+#endif /* VBOX_WITH_DEBUGGER */
+#ifdef UNUSED_FUNCTIONS
/**
* Returns the host context pointer and size of the patch memory block
*
- * @returns VBox status code.
+ * @returns Host context pointer.
* @param pVM Pointer to the VM.
* @param pcb Size of the patch memory block
+ * @internal
*/
-VMMR3DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
+VMMR3_INT_DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
{
+ AssertReturn(!HMIsEnabled(pVM), NULL);
if (pcb)
*pcb = pVM->patm.s.cbPatchMem;
-
return pVM->patm.s.pPatchMemHC;
}
@@ -979,18 +1011,19 @@ VMMR3DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
/**
* Returns the guest context pointer and size of the patch memory block
*
- * @returns VBox status code.
+ * @returns Guest context pointer.
* @param pVM Pointer to the VM.
* @param pcb Size of the patch memory block
*/
-VMMR3DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
+VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
{
+ AssertReturn(!HMIsEnabled(pVM), NIL_RTRCPTR);
if (pcb)
*pcb = pVM->patm.s.cbPatchMem;
-
return pVM->patm.s.pPatchMemGC;
}
+#endif /* UNUSED_FUNCTIONS */
/**
* Returns the host context pointer of the GC context structure
@@ -998,62 +1031,78 @@ VMMR3DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
+VMMR3_INT_DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
{
+ AssertReturn(!HMIsEnabled(pVM), NULL);
return pVM->patm.s.pGCStateHC;
}
+#ifdef UNUSED_FUNCTION
/**
* Checks whether the HC address is part of our patch region
*
- * @returns VBox status code.
+ * @returns true/false.
* @param pVM Pointer to the VM.
- * @param pAddrGC Guest context address
+ * @param pAddrHC Host context ring-3 address to check.
*/
-VMMR3DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, R3PTRTYPE(uint8_t *) pAddrHC)
+VMMR3_INT_DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, void *pAddrHC)
{
- return (pAddrHC >= pVM->patm.s.pPatchMemHC && pAddrHC < pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) ? true : false;
+ return (uintptr_t)pAddrHC >= (uintptr_t)pVM->patm.s.pPatchMemHC
+ && (uintptr_t)pAddrHC < (uintptr_t)pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem;
}
+#endif
/**
* Allows or disallow patching of privileged instructions executed by the guest OS
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param fAllowPatching Allow/disallow patching
+ * @param pUVM The user mode VM handle.
+ * @param fAllowPatching Allow/disallow patching
*/
-VMMR3DECL(int) PATMR3AllowPatching(PVM pVM, uint32_t fAllowPatching)
+VMMR3DECL(int) PATMR3AllowPatching(PUVM pUVM, bool fAllowPatching)
{
- pVM->fPATMEnabled = (fAllowPatching) ? true : false;
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ if (!HMIsEnabled(pVM))
+ pVM->fPATMEnabled = fAllowPatching;
+ else
+ Assert(!pVM->fPATMEnabled);
return VINF_SUCCESS;
}
+
/**
- * Convert a GC patch block pointer to a HC patch pointer
+ * Checks if the patch manager is enabled or not.
*
- * @returns HC pointer or NULL if it's not a GC patch pointer
- * @param pVM Pointer to the VM.
- * @param pAddrGC GC pointer
+ * @returns true if enabled, false if not (or if invalid handle).
+ * @param pUVM The user mode VM handle.
*/
-VMMR3DECL(R3PTRTYPE(void *)) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
+VMMR3DECL(bool) PATMR3IsEnabled(PUVM pUVM)
{
- if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
- return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
- else
- return NULL;
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
+ return PATMIsEnabled(pVM);
}
+
/**
- * Query PATM state (enabled/disabled)
+ * Convert a GC patch block pointer to a HC patch pointer
*
- * @returns 0 - disabled, 1 - enabled
+ * @returns HC pointer or NULL if it's not a GC patch pointer
* @param pVM Pointer to the VM.
+ * @param pAddrGC GC pointer
*/
-VMMR3DECL(int) PATMR3IsEnabled(PVM pVM)
+VMMR3_INT_DECL(void *) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
{
- return pVM->fPATMEnabled;
+ AssertReturn(!HMIsEnabled(pVM), NULL);
+ if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
+ return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
+ return NULL;
}
@@ -1068,7 +1117,7 @@ VMMR3DECL(int) PATMR3IsEnabled(PVM pVM)
* @returns Host context pointer or NULL in case of an error
*
*/
-R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
+R3PTRTYPE(uint8_t *) patmR3GCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
{
int rc;
R3PTRTYPE(uint8_t *) pHCPtr;
@@ -1104,7 +1153,8 @@ R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RC
}
-/* Calculates and fills in all branch targets
+/**
+ * Calculates and fills in all branch targets
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
@@ -1140,7 +1190,7 @@ static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
{
/* Special case: call function replacement patch from this patch block.
*/
- PPATMPATCHREC pFunctionRec = PATMQueryFunctionPatch(pVM, pRec->pTargetGC);
+ PPATMPATCHREC pFunctionRec = patmQueryFunctionPatch(pVM, pRec->pTargetGC);
if (!pFunctionRec)
{
int rc;
@@ -1200,7 +1250,8 @@ static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
return VINF_SUCCESS;
}
-/* Add an illegal instruction record
+/**
+ * Add an illegal instruction record
*
* @param pVM Pointer to the VM.
* @param pPatch Patch structure ptr
@@ -1241,9 +1292,10 @@ static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
* @param enmType Lookup type
* @param fDirty Dirty flag
*
+ * @note Be extremely careful with this function. Make absolutely sure the guest
+ * address is correct! (to avoid executing instructions twice!)
*/
- /** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
-void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
+void patmR3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
{
bool ret;
PRECPATCHTOGUEST pPatchToGuestRec;
@@ -1348,7 +1400,7 @@ static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
* @param pVM Pointer to the VM.
* @param ppTree Tree to empty
*/
-void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
+static void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
{
NOREF(pVM);
RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
@@ -1370,7 +1422,7 @@ static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
* @param pVM Pointer to the VM.
* @param ppTree Tree to empty
*/
-void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
+static void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
{
NOREF(pVM);
RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
@@ -1520,6 +1572,11 @@ static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_
case OP_JMP:
break;
+#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
+ case OP_STR:
+ break;
+#endif
+
default:
if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
{
@@ -1631,6 +1688,11 @@ static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uin
case OP_RETN:
return VINF_SUCCESS;
+#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
+ case OP_STR:
+ break;
+#endif
+
case OP_POPF:
case OP_STI:
return VWRN_CONTINUE_ANALYSIS;
@@ -1693,7 +1755,7 @@ static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *
pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
/* Add lookup record for patch to guest address translation */
- patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
+ patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
/* Update lowest and highest instruction address for this patch */
if (pCurInstrGC < pPatch->pInstrGCLowest)
@@ -1791,6 +1853,7 @@ static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *
goto duplicate_instr;
case OP_POP:
+ /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS)
{
Assert(pCpu->pCurInstr->fOpType & DISOPTYPE_INHIBIT_IRQS);
@@ -1825,7 +1888,7 @@ static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *
pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
{ /* Force pNextInstrHC out of scope after using it */
- uint8_t *pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
+ uint8_t *pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
if (pNextInstrHC == NULL)
{
AssertFailed();
@@ -1898,6 +1961,7 @@ static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *
break;
case OP_PUSH:
+ /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS)
{
rc = patmPatchGenPushCS(pVM, pPatch);
@@ -1932,6 +1996,10 @@ static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *
break;
case OP_STR:
+#ifdef VBOX_WITH_SAFE_STR /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table and move OP_STR into #ifndef */
+ /* Now safe because our shadow TR entry is identical to the guest's. */
+ goto duplicate_instr;
+#endif
case OP_SLDT:
rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
if (RT_SUCCESS(rc))
@@ -2087,7 +2155,8 @@ end:
#ifdef LOG_ENABLED
-/* Add a disasm jump record (temporary for prevent duplicate analysis)
+/**
+ * Add a disasm jump record (temporary for prevent duplicate analysis)
*
* @param pVM Pointer to the VM.
* @param pPatch Patch structure ptr
@@ -2149,7 +2218,7 @@ int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstr
pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
{ /* Force pOrgJumpHC out of scope after using it */
- uint8_t *pOrgJumpHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
+ uint8_t *pOrgJumpHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL);
if (!disret || cpu.pCurInstr->uOpcode != OP_CALL || cpu.Param1.cb != 4 /* only near calls */)
@@ -2210,7 +2279,7 @@ int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *
while (rc == VWRN_CONTINUE_ANALYSIS)
{
- pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
+ pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
if (pCurInstrHC == NULL)
{
rc = VERR_PATCHING_REFUSED;
@@ -2342,13 +2411,12 @@ int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uin
* @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
*
*/
-VMMR3DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
+VMMR3_INT_DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
{
- PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
+ AssertReturn(!HMIsEnabled(pVM), VERR_PATCH_NO_CONFLICT);
+ PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
if (pTargetPatch)
- {
return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
- }
return VERR_PATCH_NO_CONFLICT;
}
@@ -2377,7 +2445,7 @@ static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTR
while (rc == VWRN_CONTINUE_RECOMPILE)
{
- pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
+ pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
if (pCurInstrHC == NULL)
{
rc = VERR_PATCHING_REFUSED; /* fatal in this case */
@@ -2395,7 +2463,7 @@ static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTR
Log(("Disassembly failed (probably page not present) -> return to caller\n"));
/* Add lookup record for patch to guest address translation */
- patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
+ patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
patmPatchGenIllegalInstr(pVM, pPatch);
rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
goto end;
@@ -2418,7 +2486,7 @@ static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTR
/* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
* Recompile the next instruction as well
*/
- pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
+ pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
if (pNextInstrHC == NULL)
{
rc = VERR_PATCHING_REFUSED; /* fatal in this case */
@@ -2511,7 +2579,7 @@ static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTR
*
* We rely on CSAM to detect and resolve conflicts
*/
- PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, addr);
+ PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, addr);
if(pTargetPatch)
{
Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
@@ -2566,7 +2634,7 @@ static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCac
Assert(pPatch->cbPatchJump <= sizeof(temp));
Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
- pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
+ pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
Assert(pPB);
#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
@@ -2706,7 +2774,7 @@ static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPA
Assert(pPatch->cbPatchJump <= sizeof(temp));
- pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
+ pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
Assert(pPB);
Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
@@ -2746,8 +2814,8 @@ static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPA
* @note returns failure if patching is not allowed or possible
*
*/
-VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
- uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
+static int patmR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
+ uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
{
PPATCHINFO pPatch = &pPatchRec->patch;
int rc = VERR_PATCHING_REFUSED;
@@ -2776,7 +2844,7 @@ VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *)
default:
if (!(pPatch->flags & PATMFL_IDTHANDLER))
{
- AssertMsg(0, ("PATMR3PatchBlock: Invalid opcode %x\n", uOpcode));
+ AssertMsg(0, ("patmR3PatchBlock: Invalid opcode %x\n", uOpcode));
return VERR_INVALID_PARAMETER;
}
}
@@ -2849,7 +2917,7 @@ VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *)
{
/* Most likely cause: we encountered an illegal instruction very early on. */
/** @todo could turn it into an int3 callable patch. */
- Log(("PATMR3PatchBlock: patch block too small -> refuse\n"));
+ Log(("patmR3PatchBlock: patch block too small -> refuse\n"));
rc = VERR_PATCHING_REFUSED;
goto failure;
}
@@ -2906,7 +2974,7 @@ VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *)
{
/*uint8_t bASMInt3 = 0xCC; - unused */
- Log(("PATMR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
+ Log(("patmR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
/* Replace first opcode byte with 'int 3'. */
rc = patmActivateInt3Patch(pVM, pPatch);
if (RT_FAILURE(rc))
@@ -2930,6 +2998,8 @@ VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *)
}
+ patmR3DbgAddPatch(pVM, pPatchRec);
+
PATM_LOG_RAW_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
@@ -2987,7 +3057,7 @@ static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATC
uint8_t *pCurInstrHC, *pInstrHC;
uint32_t orgOffsetPatchMem = ~0;
- pInstrHC = pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
+ pInstrHC = pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
/*
@@ -3042,7 +3112,7 @@ static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATC
goto failure;
/* Add lookup record for patch to guest address translation (for the push) */
- patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
+ patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
/* Duplicate push. */
rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
@@ -3080,6 +3150,7 @@ static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATC
pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
+ patmR3DbgAddPatch(pVM, pPatchRec);
pPatch->uState = PATCH_ENABLED;
@@ -3091,7 +3162,7 @@ failure:
if (orgOffsetPatchMem != (uint32_t)~0)
pVM->patm.s.offPatchMem = orgOffsetPatchMem;
- return PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
+ return patmR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
}
/**
@@ -3154,6 +3225,7 @@ static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pP
pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
+ patmR3DbgAddPatch(pVM, pPatchRec);
pPatch->uState = PATCH_ENABLED;
return VINF_SUCCESS;
@@ -3283,6 +3355,8 @@ static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatch
goto failure;
}
+ patmR3DbgAddPatch(pVM, pPatchRec);
+
#ifdef LOG_ENABLED
Log(("Patch code ----------------------------------------------------------\n"));
patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
@@ -3433,6 +3507,7 @@ static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchR
rc = VERR_PATCHING_REFUSED;
goto failure;
}
+ patmR3DbgAddPatch(pVM, pPatchRec);
/* size of patch block */
pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
@@ -3495,11 +3570,12 @@ failure:
* @param pCtx Pointer to the guest CPU context.
*
*/
-VMMR3DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
+VMMR3_INT_DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
{
RTRCPTR pBranchTarget, pPage;
int rc;
RTRCPTR pPatchTargetGC = 0;
+ AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
pBranchTarget = pCtx->edx;
pBranchTarget = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
@@ -3560,7 +3636,7 @@ VMMR3DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
}
Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
- rc = PATMAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
+ rc = patmAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
AssertRC(rc);
pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
@@ -3615,7 +3691,7 @@ static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC,
*/
uint8_t *pTmpInstrHC;
- pTmpInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
+ pTmpInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
Assert(pTmpInstrHC);
if (pTmpInstrHC == 0)
break;
@@ -3695,12 +3771,13 @@ static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPAT
if (pCpu->Param2.fUse != DISUSE_DISPLACEMENT32)
goto failure;
- pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
+ pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
if (pPB == 0)
goto failure;
/* Add relocation record for cached data access. */
- if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
+ if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC,
+ pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
{
Log(("Relocation failed for cached mmio address!!\n"));
return VERR_PATCHING_REFUSED;
@@ -3714,7 +3791,8 @@ static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPAT
pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
/* Replace address with that of the cached item. */
- rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
+ rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR),
+ &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
AssertRC(rc);
if (RT_FAILURE(rc))
{
@@ -3852,14 +3930,13 @@ static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
* @note returns failure if patching is not allowed or possible
*
*/
-VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu,
- PPATCHINFO pPatch)
+int patmR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
{
uint8_t bASMInt3 = 0xCC;
int rc;
/* Note: Do not use patch memory here! It might called during patch installation too. */
- PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "PATMR3PatchInstrInt3:", "");
+ PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "patmR3PatchInstrInt3:", "");
/* Save the original instruction. */
rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
@@ -3968,9 +4045,9 @@ int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISC
* A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
* references the target instruction in the conflict patch.
*/
- RTRCPTR pJmpDest = PATMR3GuestGCPtrToPatchGCPtr(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue);
+ RTRCPTR pJmpDest = patmR3GuestGCPtrToPatchGCPtrSimple(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue);
- AssertMsg(pJmpDest, ("PATMR3GuestGCPtrToPatchGCPtr failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue));
+ AssertMsg(pJmpDest, ("patmR3GuestGCPtrToPatchGCPtrSimple failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue));
pPatch->pPatchJumpDestGC = pJmpDest;
PATMP2GLOOKUPREC cacheRec;
@@ -4019,7 +4096,7 @@ failure:
* @param pInstr Guest context point to privileged instruction
* @param flags Patch flags
*/
-VMMR3DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
+VMMR3_INT_DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
{
Assert(pInstrGC);
Assert(flags == PATMFL_CODE32);
@@ -4038,7 +4115,7 @@ VMMR3DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
*
* @note returns failure if patching is not allowed or possible
*/
-VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
+VMMR3_INT_DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
{
DISCPUSTATE cpu;
R3PTRTYPE(uint8_t *) pInstrHC;
@@ -4050,6 +4127,8 @@ VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
PVMCPU pVCpu = VMMGetCpu0(pVM);
LogFlow(("PATMR3InstallPatch: %08x (%#llx)\n", pInstrGC, flags));
+ AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
+
if ( !pVM
|| pInstrGC == 0
|| (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
@@ -4064,7 +4143,7 @@ VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
/* Test for patch conflict only with patches that actually change guest code. */
if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
{
- PPATCHINFO pConflictPatch = PATMFindActivePatchByEntrypoint(pVM, pInstrGC);
+ PPATCHINFO pConflictPatch = patmFindActivePatchByEntrypoint(pVM, pInstrGC);
AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
if (pConflictPatch != 0)
return VERR_PATCHING_REFUSED;
@@ -4081,7 +4160,7 @@ VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
if (pVM->patm.s.fOutOfMemory == true)
return VERR_PATCHING_REFUSED;
-#if 0 /* DONT COMMIT ENABLED! */
+#if 1 /* DONT COMMIT ENABLED! */
/* Blacklisted NT4SP1 areas - debugging why we sometimes crash early on, */
if ( 0
//|| (pInstrGC - 0x80010000U) < 0x10000U // NT4SP1 HAL
@@ -4231,7 +4310,7 @@ VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
PATMP2GLOOKUPREC cacheRec;
RT_ZERO(cacheRec);
- pInstrHC = PATMGCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
+ pInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
/* Allocate patch record. */
@@ -4336,7 +4415,7 @@ VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
else
if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
{
- rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
+ rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
}
else
if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
@@ -4365,7 +4444,7 @@ VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
{
case OP_SYSENTER:
case OP_PUSH:
- rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
+ rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
if (rc == VINF_SUCCESS)
{
if (rc == VINF_SUCCESS)
@@ -4384,7 +4463,7 @@ VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
switch (cpu.pCurInstr->uOpcode)
{
case OP_SYSENTER:
- rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
+ rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
if (rc == VINF_SUCCESS)
{
Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
@@ -4425,10 +4504,12 @@ VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
case OP_PUSHF:
case OP_CLI:
Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
- rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec);
+ rc = patmR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec);
break;
+#ifndef VBOX_WITH_SAFE_STR
case OP_STR:
+#endif
case OP_SGDT:
case OP_SLDT:
case OP_SIDT:
@@ -4439,7 +4520,10 @@ VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
case OP_VERW:
case OP_VERR:
case OP_IRET:
- rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
+#ifdef VBOX_WITH_RAW_RING1
+ case OP_MOV:
+#endif
+ rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
break;
default:
@@ -4509,6 +4593,9 @@ VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
#endif
}
#endif
+
+ /* Add debug symbol. */
+ patmR3DbgAddPatch(pVM, pPatchRec);
}
/* Free leftover lock if any. */
if (cacheRec.Lock.pvMap)
@@ -4772,7 +4859,7 @@ int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
* @param pVM Pointer to the VM.
* @param pPatch Patch record
*/
-int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
+static int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
{
int rc;
RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
@@ -4807,13 +4894,14 @@ int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
* @param cbWrite Nr of bytes to write
*
*/
-VMMR3DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
+VMMR3_INT_DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
{
RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
Assert(VM_IS_EMT(pVM));
+ AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
/* Quick boundary check */
if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
@@ -4904,7 +4992,7 @@ loop_start:
{
LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
- PATMR3MarkDirtyPatch(pVM, pPatch);
+ patmR3MarkDirtyPatch(pVM, pPatch);
/* Note: jump back to the start as the pPatchPage has been deleted or changed */
goto loop_start;
@@ -4957,7 +5045,7 @@ invalid_write_loop_start:
else
{
LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
- PATMR3MarkDirtyPatch(pVM, pPatch);
+ patmR3MarkDirtyPatch(pVM, pPatch);
}
/* Note: jump back to the start as the pPatchPage has been deleted or changed */
goto invalid_write_loop_start;
@@ -4978,11 +5066,13 @@ invalid_write_loop_start:
* @returns VBox status code
* @param pVM Pointer to the VM.
* @param addr GC address of the page to flush
+ * @note Currently only called by CSAMR3FlushPage; optimization to avoid
+ * having to double check if the physical address has changed
*/
-/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
- */
-VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
+VMMR3_INT_DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
{
+ AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
+
addr &= PAGE_BASE_GC_MASK;
PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
@@ -4998,7 +5088,7 @@ VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
PPATCHINFO pPatch = pPatchPage->papPatch[i];
Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
- PATMR3MarkDirtyPatch(pVM, pPatch);
+ patmR3MarkDirtyPatch(pVM, pPatch);
}
}
STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
@@ -5013,8 +5103,9 @@ VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
* @param pVM Pointer to the VM.
* @param pInstrGC Guest context pointer to instruction
*/
-VMMR3DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
+VMMR3_INT_DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
{
+ Assert(!HMIsEnabled(pVM));
PPATMPATCHREC pPatchRec;
pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
@@ -5038,12 +5129,12 @@ VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
/** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
/* Shortcut. */
- if ( !PATMIsEnabled(pVM)
- || pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
- || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
- {
+ if (!PATMIsEnabled(pVM))
+ return VERR_PATCH_NOT_FOUND;
+ Assert(!HMIsEnabled(pVM));
+ if ( pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
+ || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
return VERR_PATCH_NOT_FOUND;
- }
pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
// if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
@@ -5076,11 +5167,13 @@ VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
* @param cbToRead The maximum number bytes to read.
* @param pcbRead Where to return the acutal number of bytes read.
*/
-VMMR3DECL(int) PATMR3ReadOrgInstr(PVM pVM, RTGCPTR32 GCPtrInstr, uint8_t *pbDst, size_t cbToRead, size_t *pcbRead)
+VMMR3_INT_DECL(int) PATMR3ReadOrgInstr(PVM pVM, RTGCPTR32 GCPtrInstr, uint8_t *pbDst, size_t cbToRead, size_t *pcbRead)
{
/* Shortcut. */
- if ( !PATMIsEnabled(pVM)
- || GCPtrInstr < pVM->patm.s.pPatchedInstrGCLowest
+ if (!PATMIsEnabled(pVM))
+ return VERR_PATCH_NOT_FOUND;
+ Assert(!HMIsEnabled(pVM));
+ if ( GCPtrInstr < pVM->patm.s.pPatchedInstrGCLowest
|| GCPtrInstr > pVM->patm.s.pPatchedInstrGCHighest)
return VERR_PATCH_NOT_FOUND;
@@ -5133,12 +5226,13 @@ VMMR3DECL(int) PATMR3ReadOrgInstr(PVM pVM, RTGCPTR32 GCPtrInstr, uint8_t *pbDst,
* @note returns failure if patching is not allowed or possible
*
*/
-VMMR3DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
+VMMR3_INT_DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
{
PPATMPATCHREC pPatchRec;
PPATCHINFO pPatch;
Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
+ AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
if (pPatchRec)
{
@@ -5292,7 +5386,7 @@ static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflict
int rc;
RT_ZERO(patch);
- pInstrHC = PATMGCVirtToHCVirt(pVM, &patch, pInstrGC);
+ pInstrHC = patmR3GCVirtToHCVirt(pVM, &patch, pInstrGC);
disret = patmR3DisInstr(pVM, &patch, pInstrGC, pInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
/*
* If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
@@ -5379,12 +5473,13 @@ static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflict
* @note returns failure if patching is not allowed or possible
*
*/
-VMMR3DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
+VMMR3_INT_DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
{
PPATMPATCHREC pPatchRec;
PPATCHINFO pPatch;
Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
+ AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
if (pPatchRec)
{
@@ -5498,7 +5593,7 @@ VMMR3DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
* @param pPatchRec Patch record
* @param fForceRemove Remove *all* patches
*/
-int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
+int patmR3RemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
{
PPATCHINFO pPatch;
@@ -5532,23 +5627,8 @@ int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
#ifdef VBOX_WITH_STATISTICS
if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
{
- STAMR3Deregister(pVM, &pPatchRec->patch);
-#ifndef DEBUG_sandervl
- STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
- STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
- STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
- STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
- STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
- STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
- STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
- STAMR3Deregister(pVM, &pPatchRec->patch.flags);
- STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
- STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
- STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
- STAMR3Deregister(pVM, &pPatchRec->patch.uState);
- STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
- STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
-#endif
+ STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
+ STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
}
#endif
@@ -5650,33 +5730,18 @@ int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
}
- /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
+ /* Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
rc = PATMR3DisablePatch(pVM, pInstrGC);
AssertRC(rc);
- /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
+ /* Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
#ifdef VBOX_WITH_STATISTICS
if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
{
- STAMR3Deregister(pVM, &pPatchRec->patch);
-#ifndef DEBUG_sandervl
- STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
- STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
- STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
- STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
- STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
- STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
- STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
- STAMR3Deregister(pVM, &pPatchRec->patch.flags);
- STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
- STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
- STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
- STAMR3Deregister(pVM, &pPatchRec->patch.uState);
- STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
- STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
-#endif
+ STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
+ STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
}
#endif
@@ -5710,7 +5775,7 @@ int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
Assert(pNewPatchRec); /* can't fail */
/* Remove old patch (only do that when everything is finished) */
- int rc2 = PATMRemovePatch(pVM, pPatchRec, true /* force removal */);
+ int rc2 = patmR3RemovePatch(pVM, pPatchRec, true /* force removal */);
AssertRC(rc2);
/* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
@@ -5787,7 +5852,7 @@ failure:
* @param fIncludeHints Include hinted patches or not
*
*/
-PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
+PPATCHINFO patmFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
{
PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
/* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
@@ -5820,14 +5885,15 @@ PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncl
*
* @returns true -> yes, false -> no
* @param pVM Pointer to the VM.
- * @param pAddr Guest context address
- * @param pPatchAddr Guest context patch address (if true)
+ * @param pAddr Guest context address.
+ * @param pPatchAddr Guest context patch address (if true).
*/
-VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
+VMMR3_INT_DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
{
RTRCPTR addr;
PPATCHINFO pPatch;
+ Assert(!HMIsEnabled(pVM));
if (PATMIsEnabled(pVM) == false)
return false;
@@ -5836,7 +5902,7 @@ VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatc
*pPatchAddr = 0;
- pPatch = PATMFindActivePatchByEntrypoint(pVM, pAddr);
+ pPatch = patmFindActivePatchByEntrypoint(pVM, pAddr);
if (pPatch)
*pPatchAddr = pPatch->pPrivInstrGC;
@@ -5853,10 +5919,11 @@ VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatc
* @note returns failure if patching is not allowed or possible
*
*/
-VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
+VMMR3_INT_DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
{
PPATMPATCHREC pPatchRec;
+ AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
if (pPatchRec)
{
@@ -5864,7 +5931,7 @@ VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
if (rc == VWRN_PATCH_REMOVED)
return VINF_SUCCESS;
- return PATMRemovePatch(pVM, pPatchRec, false);
+ return patmR3RemovePatch(pVM, pPatchRec, false);
}
AssertFailed();
return VERR_PATCH_NOT_FOUND;
@@ -5880,7 +5947,7 @@ VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
* @note returns failure if patching is not allowed or possible
*
*/
-VMMR3DECL(int) PATMR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
+static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
{
if (pPatch->pPatchBlockOffset)
{
@@ -5931,7 +5998,8 @@ RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t
return 0;
}
-/* Converts Guest code GC ptr to Patch code GC ptr (if found)
+/**
+ * Converts Guest code GC ptr to Patch code GC ptr (if found)
*
* @returns corresponding GC pointer in patch block
* @param pVM Pointer to the VM.
@@ -5951,37 +6019,37 @@ RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t
return 0;
}
-/* Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no identical match)
+/**
+ * Converts Guest code GC ptr to Patch code GC ptr (if found)
*
* @returns corresponding GC pointer in patch block
* @param pVM Pointer to the VM.
- * @param pPatch Current patch block pointer
* @param pInstrGC Guest context pointer to privileged instruction
- *
*/
-RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
+static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
{
- PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
- if (pGuestToPatchRec)
- return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
-
- return 0;
+ PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
+ if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
+ return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
+ return NIL_RTRCPTR;
}
-/* Converts Guest code GC ptr to Patch code GC ptr (if found)
+/**
+ * Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no
+ * identical match)
*
* @returns corresponding GC pointer in patch block
* @param pVM Pointer to the VM.
+ * @param pPatch Current patch block pointer
* @param pInstrGC Guest context pointer to privileged instruction
*
*/
-VMMR3DECL(RTRCPTR) PATMR3GuestGCPtrToPatchGCPtr(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
+RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
{
- PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
- if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
- return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
- else
- return 0;
+ PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
+ if (pGuestToPatchRec)
+ return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
+ return NIL_RTRCPTR;
}
/**
@@ -5993,13 +6061,14 @@ VMMR3DECL(RTRCPTR) PATMR3GuestGCPtrToPatchGCPtr(PVM pVM, RCPTRTYPE(uint8_t*) pIn
* @param pEnmState State of the translated address (out)
*
*/
-VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
+VMMR3_INT_DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
{
PPATMPATCHREC pPatchRec;
void *pvPatchCoreOffset;
RTRCPTR pPrivInstrGC;
Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
+ Assert(!HMIsEnabled(pVM));
pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
if (pvPatchCoreOffset == 0)
{
@@ -6037,7 +6106,7 @@ VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE
*pEnmState = PATMTRANS_OVERWRITTEN;
}
else
- if (PATMFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
+ if (patmFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
{
*pEnmState = PATMTRANS_OVERWRITTEN;
}
@@ -6059,17 +6128,18 @@ VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE
* @param pVM Pointer to the VM.
* @param pAddrGC Guest context address
*/
-VMMR3DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
+VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
{
PPATMPATCHREC pPatchRec;
+ Assert(!HMIsEnabled(pVM));
+
/* Find the patch record. */
pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
/** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
return PATCHCODE_PTR_GC(&pPatchRec->patch);
- else
- return 0;
+ return NIL_RTRCPTR;
}
/**
@@ -6129,7 +6199,7 @@ static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch,
#ifdef DEBUG
char szBuf[256];
- DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
+ DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
szBuf, sizeof(szBuf), NULL);
Log(("DIRTY: %s\n", szBuf));
#endif
@@ -6192,7 +6262,7 @@ static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch,
{
#ifdef DEBUG
char szBuf[256];
- DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
+ DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
szBuf, sizeof(szBuf), NULL);
Log(("NEW: %s\n", szBuf));
#endif
@@ -6202,18 +6272,18 @@ static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch,
AssertRC(rc);
/* Add a new lookup record for the duplicated instruction. */
- patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
+ patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
}
else
{
#ifdef DEBUG
char szBuf[256];
- DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
+ DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
szBuf, sizeof(szBuf), NULL);
Log(("NEW: %s (FAILED)\n", szBuf));
#endif
/* Restore the old lookup record for the duplicated instruction. */
- patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
+ patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
/** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
rc = VERR_PATCHING_REFUSED;
@@ -6245,8 +6315,8 @@ static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch,
*(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
#ifdef DEBUG
char szBuf[256];
- DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
- szBuf, sizeof(szBuf), NULL);
+ DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC,
+ DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
Log(("FILL: %s\n", szBuf));
#endif
}
@@ -6257,7 +6327,7 @@ static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch,
pPatchFillHC[i] = 0x90; /* NOP */
#ifdef DEBUG
char szBuf[256];
- DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC + i,
+ DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC + i,
DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
Log(("FILL: %s\n", szBuf));
#endif
@@ -6308,7 +6378,7 @@ static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch,
* @param pEip GC pointer of trapping instruction.
* @param ppNewEip GC pointer to new instruction.
*/
-VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
+VMMR3_INT_DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
{
PPATMPATCHREC pPatch = 0;
void *pvPatchCoreOffset;
@@ -6318,6 +6388,7 @@ VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *p
PRECPATCHTOGUEST pPatchToGuestRec = 0;
PVMCPU pVCpu = VMMGetCpu0(pVM);
+ AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
Assert(pVM->cCpus == 1);
pNewEip = 0;
@@ -6449,7 +6520,7 @@ VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *p
}
char szBuf[256];
- DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs.Sel, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
+ DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
/* Very bad. We crashed in emitted code. Probably stack? */
if (pPatch)
@@ -6507,7 +6578,7 @@ VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *p
RT_ZERO(cacheRec);
cacheRec.pPatch = &pPatch->patch;
- disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
+ disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
&cpu, &cbInstr);
if (cacheRec.Lock.pvMap)
PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
@@ -6546,14 +6617,14 @@ VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *p
RT_ZERO(cacheRec);
cacheRec.pPatch = &pPatch->patch;
- disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
+ disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
&cpu, &cbInstr);
if (cacheRec.Lock.pvMap)
PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
if (disret && (cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_INT3))
{
- disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
+ disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
&cpu, &cbInstr);
if (cacheRec.Lock.pvMap)
PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
@@ -6566,7 +6637,7 @@ VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *p
}
Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
- DBGFR3DisasInstrLog(pVCpu, pCtx->cs.Sel, pNewEip, "PATCHRET: ");
+ DBGFR3_DISAS_INSTR_LOG(pVCpu, pCtx->cs.Sel, pNewEip, "PATCHRET: ");
if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
{
/* We can't jump back to code that we've overwritten with a 5 byte jump! */
@@ -6600,10 +6671,11 @@ VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *p
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
+VMMR3_INT_DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
{
- RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
+ AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
+ RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
addr &= PAGE_BASE_GC_MASK;
int rc = PGMHandlerVirtualDeregister(pVM, addr);
@@ -6752,8 +6824,8 @@ RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
}
#endif /* VBOX_WITH_STATISTICS */
-
#ifdef VBOX_WITH_DEBUGGER
+
/**
* The '.patmoff' command.
*
@@ -6764,17 +6836,21 @@ RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
* @param paArgs Pointer to (readonly) array of arguments.
* @param cArgs Number of arguments in the array.
*/
-static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
+static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
{
/*
* Validate input.
*/
- NOREF(pCmd); NOREF(cArgs); NOREF(paArgs);
- if (!pVM)
- return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
+ NOREF(cArgs); NOREF(paArgs);
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ if (HMIsEnabled(pVM))
+ return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
- PATMR3AllowPatching(pVM, false);
+ PATMR3AllowPatching(pVM->pUVM, false);
return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
}
@@ -6788,17 +6864,22 @@ static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM p
* @param paArgs Pointer to (readonly) array of arguments.
* @param cArgs Number of arguments in the array.
*/
-static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
+static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
{
/*
* Validate input.
*/
- NOREF(pCmd); NOREF(cArgs); NOREF(paArgs);
- if (!pVM)
- return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
+ NOREF(cArgs); NOREF(paArgs);
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ if (HMIsEnabled(pVM))
+ return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
- PATMR3AllowPatching(pVM, true);
+ PATMR3AllowPatching(pVM->pUVM, true);
RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
}
-#endif
+
+#endif /* VBOX_WITH_DEBUGGER */
diff --git a/src/VBox/VMM/VMMR3/PATMA.asm b/src/VBox/VMM/VMMR3/PATMA.asm
index cab836a8..2badb050 100644
--- a/src/VBox/VMM/VMMR3/PATMA.asm
+++ b/src/VBox/VMM/VMMR3/PATMA.asm
@@ -3,7 +3,7 @@
; PATM Assembly Routines.
;
-; Copyright (C) 2006-2007 Oracle Corporation
+; Copyright (C) 2006-2012 Oracle Corporation
;
; This file is part of VirtualBox Open Source Edition (OSE), as
; available from http://www.virtualbox.org. This file is free software;
@@ -1261,7 +1261,7 @@ PATMIretStart:
jz near iret_clearIF
; force ring 1 CS RPL
- or dword [esp+8], 1
+ or dword [esp+8], 1 ;-> @todo we leave traces or raw mode if we jump back to the host context to handle pending interrupts! (below)
iret_notring0:
; if interrupts are pending, then we must go back to the host context to handle them!
@@ -1461,6 +1461,304 @@ GLOBALNAME PATMIretRecord
DD 0ffffffffh
SECTION .text
+;;****************************************************
+;; Abstract:
+;;
+;; if eflags.NT==0 && iretstack.eflags.VM==0 && iretstack.eflags.IOPL==0
+;; then
+;; if return to ring 0 (iretstack.new_cs & 3 == 0)
+;; then
+;; if iretstack.new_eflags.IF == 1 && iretstack.new_eflags.IOPL == 0
+;; then
+;; iretstack.new_cs |= 1
+;; else
+;; int 3
+;; endif
+;; uVMFlags &= ~X86_EFL_IF
+;; iret
+;; else
+;; int 3
+;;****************************************************
+;;
+; Stack:
+;
+; esp + 32 - GS (V86 only)
+; esp + 28 - FS (V86 only)
+; esp + 24 - DS (V86 only)
+; esp + 20 - ES (V86 only)
+; esp + 16 - SS (if transfer to outer ring)
+; esp + 12 - ESP (if transfer to outer ring)
+; esp + 8 - EFLAGS
+; esp + 4 - CS
+; esp - EIP
+;;
+BEGINPROC PATMIretRing1Replacement
+PATMIretRing1Start:
+ mov dword [ss:PATM_INTERRUPTFLAG], 0
+ pushfd
+
+%ifdef PATM_LOG_PATCHIRET
+ push eax
+ push ecx
+ push edx
+ lea edx, dword [ss:esp+12+4] ;3 dwords + pushed flags -> iret eip
+ mov eax, PATM_ACTION_LOG_IRET
+ lock or dword [ss:PATM_PENDINGACTION], eax
+ mov ecx, PATM_ACTION_MAGIC
+ db 0fh, 0bh ; illegal instr (hardcoded assumption in PATMHandleIllegalInstrTrap)
+ pop edx
+ pop ecx
+ pop eax
+%endif
+
+ test dword [esp], X86_EFL_NT
+ jnz near iretring1_fault1
+
+ ; we can't do an iret to v86 code, as we run with CPL=1. The iret would attempt a protected mode iret and (most likely) fault.
+ test dword [esp+12], X86_EFL_VM
+ jnz near iretring1_return_to_v86
+
+ ;;!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ;;@todo: not correct for iret back to ring 2!!!!!
+ ;;!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ test dword [esp+8], 2
+ jnz iretring1_checkpendingirq
+
+ test dword [esp+12], X86_EFL_IF
+ jz near iretring1_clearIF
+
+iretring1_checkpendingirq:
+
+; if interrupts are pending, then we must go back to the host context to handle them!
+; Note: This is very important as pending pic interrupts can be overridden by apic interrupts if we don't check early enough (Fedora 5 boot)
+; @@todo fix this properly, so we can dispatch pending interrupts in GC
+ test dword [ss:PATM_VM_FORCEDACTIONS], VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
+ jz iretring1_continue
+
+; Go to our hypervisor trap handler to dispatch the pending irq
+ mov dword [ss:PATM_TEMP_EAX], eax
+ mov dword [ss:PATM_TEMP_ECX], ecx
+ mov dword [ss:PATM_TEMP_EDI], edi
+ mov dword [ss:PATM_TEMP_RESTORE_FLAGS], PATM_RESTORE_EAX | PATM_RESTORE_ECX | PATM_RESTORE_EDI
+ mov eax, PATM_ACTION_PENDING_IRQ_AFTER_IRET
+ lock or dword [ss:PATM_PENDINGACTION], eax
+ mov ecx, PATM_ACTION_MAGIC
+ mov edi, PATM_CURINSTRADDR
+
+ popfd
+ db 0fh, 0bh ; illegal instr (hardcoded assumption in PATMHandleIllegalInstrTrap)
+ ; does not return
+
+iretring1_continue:
+
+ test dword [esp+8], 2
+ jnz iretring1_notring01
+
+ test dword [esp+8], 1
+ jz iretring1_ring0
+
+ ; ring 1 return change CS & SS RPL to 2 from 1
+ and dword [esp+8], ~1 ; CS
+ or dword [esp+8], 2
+
+ and dword [esp+20], ~1 ; SS
+ or dword [esp+20], 2
+
+ jmp short iretring1_notring01
+iretring1_ring0:
+ ; force ring 1 CS RPL
+ or dword [esp+8], 1
+
+iretring1_notring01:
+ ; This section must *always* be executed (!!)
+ ; Extract the IOPL from the return flags, save them to our virtual flags and
+ ; put them back to zero
+ ; @note we assume iretd doesn't fault!!!
+ push eax
+ mov eax, dword [esp+16]
+ and eax, X86_EFL_IOPL
+ and dword [ss:PATM_VMFLAGS], ~X86_EFL_IOPL
+ or dword [ss:PATM_VMFLAGS], eax
+ pop eax
+ and dword [esp+12], ~X86_EFL_IOPL
+
+ ; Set IF again; below we make sure this won't cause problems.
+ or dword [ss:PATM_VMFLAGS], X86_EFL_IF
+
+ ; make sure iret is executed fully (including the iret below; cli ... iret can otherwise be interrupted)
+ mov dword [ss:PATM_INHIBITIRQADDR], PATM_CURINSTRADDR
+
+ popfd
+ mov dword [ss:PATM_INTERRUPTFLAG], 1
+ iretd
+ PATM_INT3
+
+iretring1_fault:
+ popfd
+ mov dword [ss:PATM_INTERRUPTFLAG], 1
+ PATM_INT3
+
+iretring1_fault1:
+ nop
+ popfd
+ mov dword [ss:PATM_INTERRUPTFLAG], 1
+ PATM_INT3
+
+iretring1_clearIF:
+ push dword [esp+4] ; eip to return to
+ pushfd
+ push eax
+ push PATM_FIXUP
+ DB 0E8h ; call
+ DD PATM_IRET_FUNCTION
+ add esp, 4 ; pushed address of jump table
+
+ cmp eax, 0
+ je near iretring1_fault3
+
+ mov dword [esp+12+4], eax ; stored eip in iret frame
+ pop eax
+ popfd
+ add esp, 4 ; pushed eip
+
+ ; This section must *always* be executed (!!)
+ ; Extract the IOPL from the return flags, save them to our virtual flags and
+ ; put them back to zero
+ push eax
+ mov eax, dword [esp+16]
+ and eax, X86_EFL_IOPL
+ and dword [ss:PATM_VMFLAGS], ~X86_EFL_IOPL
+ or dword [ss:PATM_VMFLAGS], eax
+ pop eax
+ and dword [esp+12], ~X86_EFL_IOPL
+
+ ; Clear IF
+ and dword [ss:PATM_VMFLAGS], ~X86_EFL_IF
+ popfd
+
+ test dword [esp+8], 1
+ jz iretring1_clearIF_ring0
+
+ ; ring 1 return change CS & SS RPL to 2 from 1
+ and dword [esp+8], ~1 ; CS
+ or dword [esp+8], 2
+
+ and dword [esp+20], ~1 ; SS
+ or dword [esp+20], 2
+ ; the patched destination code will set PATM_INTERRUPTFLAG after the return!
+ iretd
+
+iretring1_clearIF_ring0:
+ ; force ring 1 CS RPL
+ or dword [esp+8], 1
+ ; the patched destination code will set PATM_INTERRUPTFLAG after the return!
+ iretd
+
+iretring1_return_to_v86:
+ test dword [esp+12], X86_EFL_IF
+ jz iretring1_fault
+
+ ; Go to our hypervisor trap handler to perform the iret to v86 code
+ mov dword [ss:PATM_TEMP_EAX], eax
+ mov dword [ss:PATM_TEMP_ECX], ecx
+ mov dword [ss:PATM_TEMP_RESTORE_FLAGS], PATM_RESTORE_EAX | PATM_RESTORE_ECX
+ mov eax, PATM_ACTION_DO_V86_IRET
+ lock or dword [ss:PATM_PENDINGACTION], eax
+ mov ecx, PATM_ACTION_MAGIC
+
+ popfd
+
+ db 0fh, 0bh ; illegal instr (hardcoded assumption in PATMHandleIllegalInstrTrap)
+ ; does not return
+
+
+iretring1_fault3:
+ pop eax
+ popfd
+ add esp, 4 ; pushed eip
+ jmp iretring1_fault
+
+align 4
+PATMIretRing1Table:
+ DW PATM_MAX_JUMPTABLE_ENTRIES ; nrSlots
+ DW 0 ; ulInsertPos
+ DD 0 ; cAddresses
+ TIMES PATCHJUMPTABLE_SIZE DB 0 ; lookup slots
+
+PATMIretRing1End:
+ENDPROC PATMIretRing1Replacement
+
+SECTION .data
+; Patch record for 'iretd'
+GLOBALNAME PATMIretRing1Record
+ RTCCPTR_DEF PATMIretRing1Start
+ DD 0
+ DD 0
+ DD 0
+ DD PATMIretRing1End- PATMIretRing1Start
+%ifdef PATM_LOG_PATCHIRET
+ DD 26
+%else
+ DD 25
+%endif
+ DD PATM_INTERRUPTFLAG
+ DD 0
+%ifdef PATM_LOG_PATCHIRET
+ DD PATM_PENDINGACTION
+ DD 0
+%endif
+ DD PATM_VM_FORCEDACTIONS
+ DD 0
+ DD PATM_TEMP_EAX
+ DD 0
+ DD PATM_TEMP_ECX
+ DD 0
+ DD PATM_TEMP_EDI
+ DD 0
+ DD PATM_TEMP_RESTORE_FLAGS
+ DD 0
+ DD PATM_PENDINGACTION
+ DD 0
+ DD PATM_CURINSTRADDR
+ DD 0
+ DD PATM_VMFLAGS
+ DD 0
+ DD PATM_VMFLAGS
+ DD 0
+ DD PATM_VMFLAGS
+ DD 0
+ DD PATM_INHIBITIRQADDR
+ DD 0
+ DD PATM_CURINSTRADDR
+ DD 0
+ DD PATM_INTERRUPTFLAG
+ DD 0
+ DD PATM_INTERRUPTFLAG
+ DD 0
+ DD PATM_INTERRUPTFLAG
+ DD 0
+ DD PATM_FIXUP
+ DD PATMIretRing1Table - PATMIretRing1Start
+ DD PATM_IRET_FUNCTION
+ DD 0
+ DD PATM_VMFLAGS
+ DD 0
+ DD PATM_VMFLAGS
+ DD 0
+ DD PATM_VMFLAGS
+ DD 0
+ DD PATM_TEMP_EAX
+ DD 0
+ DD PATM_TEMP_ECX
+ DD 0
+ DD PATM_TEMP_RESTORE_FLAGS
+ DD 0
+ DD PATM_PENDINGACTION
+ DD 0
+ DD 0ffffffffh
+SECTION .text
+
;
; global function for implementing 'iret' to code with IF cleared
diff --git a/src/VBox/VMM/VMMR3/PATMA.mac b/src/VBox/VMM/VMMR3/PATMA.mac
index d6e0d3c6..7dd31852 100644
--- a/src/VBox/VMM/VMMR3/PATMA.mac
+++ b/src/VBox/VMM/VMMR3/PATMA.mac
@@ -4,7 +4,7 @@
;
;
-; Copyright (C) 2006-2007 Oracle Corporation
+; Copyright (C) 2006-2010 Oracle Corporation
;
; This file is part of VirtualBox Open Source Edition (OSE), as
; available from http://www.virtualbox.org. This file is free software;
diff --git a/src/VBox/VMM/VMMR3/PATMGuest.cpp b/src/VBox/VMM/VMMR3/PATMGuest.cpp
index 932853fe..efdd4ea5 100644
--- a/src/VBox/VMM/VMMR3/PATMGuest.cpp
+++ b/src/VBox/VMM/VMMR3/PATMGuest.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -194,7 +194,7 @@ int PATMPatchOpenBSDHandlerPrefix(PVM pVM, PDISCPUSTATE pCpu, RTGCPTR32 pInstrGC
}
/* Found it; patch the push cs */
pPatchRec->patch.flags &= ~(PATMFL_GUEST_SPECIFIC); /* prevent a breakpoint from being triggered */
- return PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, pCpu, &pPatchRec->patch);
+ return patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, pCpu, &pPatchRec->patch);
}
/**
@@ -209,7 +209,7 @@ int PATMPatchOpenBSDHandlerPrefix(PVM pVM, PDISCPUSTATE pCpu, RTGCPTR32 pInstrGC
* @param pPatchRec Patch structure
*
*/
-int PATMInstallGuestSpecificPatch(PVM pVM, PDISCPUSTATE pCpu, RTGCPTR32 pInstrGC, uint8_t *pInstrHC, PPATMPATCHREC pPatchRec)
+int patmR3InstallGuestSpecificPatch(PVM pVM, PDISCPUSTATE pCpu, RTGCPTR32 pInstrGC, uint8_t *pInstrHC, PPATMPATCHREC pPatchRec)
{
int rc;
diff --git a/src/VBox/VMM/VMMR3/PATMPatch.cpp b/src/VBox/VMM/VMMR3/PATMPatch.cpp
index db64a0b4..33f4673a 100644
--- a/src/VBox/VMM/VMMR3/PATMPatch.cpp
+++ b/src/VBox/VMM/VMMR3/PATMPatch.cpp
@@ -6,7 +6,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -22,28 +22,25 @@
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_PATM
#include <VBox/vmm/patm.h>
-#include <VBox/vmm/stam.h>
#include <VBox/vmm/pgm.h>
#include <VBox/vmm/cpum.h>
#include <VBox/vmm/mm.h>
+#include <VBox/vmm/em.h>
#include <VBox/vmm/trpm.h>
-#include <VBox/param.h>
-#include <iprt/avl.h>
+#include <VBox/vmm/csam.h>
#include "PATMInternal.h"
#include <VBox/vmm/vm.h>
-#include <VBox/vmm/csam.h>
+#include <VBox/param.h>
-#include <VBox/dbg.h>
#include <VBox/err.h>
#include <VBox/log.h>
+#include <VBox/dis.h>
+#include <VBox/disopcode.h>
+
#include <iprt/assert.h>
#include <iprt/asm.h>
#include <iprt/string.h>
-#include <VBox/dis.h>
-#include <VBox/disopcode.h>
-#include <stdlib.h>
-#include <stdio.h>
#include "PATMA.h"
#include "PATMPatch.h"
@@ -373,7 +370,7 @@ static uint32_t patmPatchGenCode(PVM pVM, PPATCHINFO pPatch, uint8_t *pPB, PPATC
/* Add lookup record for patch to guest address translation */
Assert(pPB[pAsmRecord->offJump - 1] == 0xE9);
- patmr3AddP2GLookupRecord(pVM, pPatch, &pPB[pAsmRecord->offJump - 1], pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
+ patmR3AddP2GLookupRecord(pVM, pPatch, &pPB[pAsmRecord->offJump - 1], pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
*(uint32_t *)&pPB[pAsmRecord->offJump] = displ;
patmPatchAddReloc32(pVM, pPatch, &pPB[pAsmRecord->offJump], FIXUP_REL_JMPTOGUEST,
@@ -435,10 +432,12 @@ int patmPatchGenIret(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, bool fSize
PATCHGEN_PROLOG(pVM, pPatch);
AssertMsg(fSizeOverride == false, ("operand size override!!\n"));
-
callInfo.pCurInstrGC = pCurInstrGC;
- size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretRecord, 0, false, &callInfo);
+ if (EMIsRawRing1Enabled(pVM))
+ size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretRing1Record, 0, false, &callInfo);
+ else
+ size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretRecord, 0, false, &callInfo);
PATCHGEN_EPILOG(pPatch, size);
return VINF_SUCCESS;
@@ -636,7 +635,7 @@ int patmPatchGenRelJump(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pTarget
case OP_JMP:
/* If interrupted here, then jump to the target instruction. Used by PATM.cpp for jumping to known instructions. */
/* Add lookup record for patch to guest address translation */
- patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pTargetGC, PATM_LOOKUP_PATCH2GUEST);
+ patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pTargetGC, PATM_LOOKUP_PATCH2GUEST);
pPB[0] = 0xE9;
break;
@@ -865,7 +864,7 @@ int patmPatchGenRet(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RCPTRTYPE(uin
}
/* Jump back to the original instruction if IF is set again. */
- Assert(!PATMFindActivePatchByEntrypoint(pVM, pCurInstrGC));
+ Assert(!patmFindActivePatchByEntrypoint(pVM, pCurInstrGC));
rc = patmPatchGenCheckIF(pVM, pPatch, pCurInstrGC);
AssertRCReturn(rc, rc);
@@ -978,7 +977,7 @@ int patmPatchGenCheckIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC)
PATCHGEN_PROLOG(pVM, pPatch);
/* Add lookup record for patch to guest address translation */
- patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
+ patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
/* Generate code to check for IF=1 before executing the call to the duplicated function. */
size = patmPatchGenCode(pVM, pPatch, pPB, &PATMCheckIFRecord, pCurInstrGC, true);
@@ -1001,7 +1000,7 @@ int patmPatchGenSetPIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
PATCHGEN_PROLOG(pVM, pPatch);
/* Add lookup record for patch to guest address translation */
- patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
+ patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
int size = patmPatchGenCode(pVM, pPatch, pPB, &PATMSetPIFRecord, 0, false);
PATCHGEN_EPILOG(pPatch, size);
@@ -1022,7 +1021,7 @@ int patmPatchGenClearPIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
PATCHGEN_PROLOG(pVM, pPatch);
/* Add lookup record for patch to guest address translation */
- patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
+ patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
int size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearPIFRecord, 0, false);
PATCHGEN_EPILOG(pPatch, size);
@@ -1048,7 +1047,7 @@ int patmPatchGenClearInhibitIRQ(PVM pVM, PPATCHINFO pPatch, RTRCPTR pNextInstrGC
Assert((pPatch->flags & (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION)) != (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION));
/* Add lookup record for patch to guest address translation */
- patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pNextInstrGC, PATM_LOOKUP_PATCH2GUEST);
+ patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pNextInstrGC, PATM_LOOKUP_PATCH2GUEST);
callInfo.pNextInstrGC = pNextInstrGC;
@@ -1073,20 +1072,25 @@ int patmPatchGenClearInhibitIRQ(PVM pVM, PPATCHINFO pPatch, RTRCPTR pNextInstrGC
*/
int patmPatchGenIntEntry(PVM pVM, PPATCHINFO pPatch, RTRCPTR pIntHandlerGC)
{
- uint32_t size;
int rc = VINF_SUCCESS;
- PATCHGEN_PROLOG(pVM, pPatch);
+ if (!EMIsRawRing1Enabled(pVM)) /* direct passthru of interrupts is not allowed in the ring-1 support case as we can't
+ deal with the ring-1/2 ambiguity in the patm asm code and we don't need it either as
+ TRPMForwardTrap takes care of the details. */
+ {
+ uint32_t size;
+ PATCHGEN_PROLOG(pVM, pPatch);
- /* Add lookup record for patch to guest address translation */
- patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pIntHandlerGC, PATM_LOOKUP_PATCH2GUEST);
+ /* Add lookup record for patch to guest address translation */
+ patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pIntHandlerGC, PATM_LOOKUP_PATCH2GUEST);
- /* Generate entrypoint for the interrupt handler (correcting CS in the interrupt stack frame) */
- size = patmPatchGenCode(pVM, pPatch, pPB,
- (pPatch->flags & PATMFL_INTHANDLER_WITH_ERRORCODE) ? &PATMIntEntryRecordErrorCode : &PATMIntEntryRecord,
- 0, false);
+ /* Generate entrypoint for the interrupt handler (correcting CS in the interrupt stack frame) */
+ size = patmPatchGenCode(pVM, pPatch, pPB,
+ (pPatch->flags & PATMFL_INTHANDLER_WITH_ERRORCODE) ? &PATMIntEntryRecordErrorCode : &PATMIntEntryRecord,
+ 0, false);
- PATCHGEN_EPILOG(pPatch, size);
+ PATCHGEN_EPILOG(pPatch, size);
+ }
// Interrupt gates set IF to 0
rc = patmPatchGenCli(pVM, pPatch);
@@ -1107,10 +1111,12 @@ int patmPatchGenTrapEntry(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTrapHandlerGC)
{
uint32_t size;
+ Assert(!EMIsRawRing1Enabled(pVM));
+
PATCHGEN_PROLOG(pVM, pPatch);
/* Add lookup record for patch to guest address translation */
- patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pTrapHandlerGC, PATM_LOOKUP_PATCH2GUEST);
+ patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pTrapHandlerGC, PATM_LOOKUP_PATCH2GUEST);
/* Generate entrypoint for the trap handler (correcting CS in the interrupt stack frame) */
size = patmPatchGenCode(pVM, pPatch, pPB,
@@ -1129,7 +1135,7 @@ int patmPatchGenStats(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
PATCHGEN_PROLOG(pVM, pPatch);
/* Add lookup record for stats code -> guest handler. */
- patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
+ patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
/* Generate code to keep calling statistics for this patch */
size = patmPatchGenCode(pVM, pPatch, pPB, &PATMStatsRecord, pInstrGC, false);
@@ -1544,7 +1550,7 @@ int patmPatchGenJumpToGuest(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pRe
PATCHGEN_PROLOG(pVM, pPatch);
/* Add lookup record for patch to guest address translation */
- patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
+ patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
/* Generate code to jump to guest code if IF=1, else fault. */
size = patmPatchGenCode(pVM, pPatch, pPB, &PATMJumpToGuest_IF1Record, pReturnAddrGC, true);
@@ -1567,7 +1573,7 @@ int patmPatchGenPatchJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, RCPTR
if (fAddLookupRecord)
{
/* Add lookup record for patch to guest address translation */
- patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
+ patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
}
pPB[0] = 0xE9; //JMP
diff --git a/src/VBox/VMM/VMMR3/PATMPatch.h b/src/VBox/VMM/VMMR3/PATMPatch.h
index 8df1b79c..42cf0f2a 100644
--- a/src/VBox/VMM/VMMR3/PATMPatch.h
+++ b/src/VBox/VMM/VMMR3/PATMPatch.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
diff --git a/src/VBox/VMM/VMMR3/PATMR3Dbg.cpp b/src/VBox/VMM/VMMR3/PATMR3Dbg.cpp
new file mode 100644
index 00000000..96e39fc5
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/PATMR3Dbg.cpp
@@ -0,0 +1,404 @@
+/* $Id: PATMR3Dbg.cpp $ */
+/** @file
+ * PATM - Dynamic Guest OS Patching Manager, Debugger Related Parts.
+ */
+
+/*
+ * Copyright (C) 2006-2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+/*******************************************************************************
+* Header Files *
+*******************************************************************************/
+#define LOG_GROUP LOG_GROUP_PATM
+#include <VBox/vmm/patm.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/hm.h>
+#include "PATMInternal.h"
+#include "PATMA.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+
+#include <iprt/assert.h>
+#include <iprt/dbg.h>
+#include <iprt/string.h>
+
+
+/*******************************************************************************
+* Defined Constants And Macros *
+*******************************************************************************/
+/** Adds a structure member to a debug (pseudo) module as a symbol. */
+#define ADD_MEMBER(a_hDbgMod, a_Struct, a_Member, a_pszName) \
+ do { \
+ rc = RTDbgModSymbolAdd(hDbgMod, a_pszName, 0 /*iSeg*/, RT_OFFSETOF(a_Struct, a_Member), \
+ RT_SIZEOFMEMB(a_Struct, a_Member), 0 /*fFlags*/, NULL /*piOrdinal*/); \
+ AssertRC(rc); \
+ } while (0)
+
+/** Adds a structure member to a debug (pseudo) module as a symbol. */
+#define ADD_FUNC(a_hDbgMod, a_BaseRCPtr, a_FuncRCPtr, a_cbFunc, a_pszName) \
+ do { \
+ int rcAddFunc = RTDbgModSymbolAdd(hDbgMod, a_pszName, 0 /*iSeg*/, \
+ (RTRCUINTPTR)a_FuncRCPtr - (RTRCUINTPTR)(a_BaseRCPtr), \
+ a_cbFunc, 0 /*fFlags*/, NULL /*piOrdinal*/); \
+ AssertRC(rcAddFunc); \
+ } while (0)
+
+
+
+/**
+ * Called by PATMR3Init.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void patmR3DbgInit(PVM pVM)
+{
+ pVM->patm.s.hDbgModPatchMem = NIL_RTDBGMOD;
+}
+
+
+/**
+ * Called by PATMR3Term.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void patmR3DbgTerm(PVM pVM)
+{
+ if (pVM->patm.s.hDbgModPatchMem != NIL_RTDBGMOD)
+ {
+ RTDbgModRelease(pVM->patm.s.hDbgModPatchMem);
+ pVM->patm.s.hDbgModPatchMem = NIL_RTDBGMOD;
+ }
+}
+
+
+/**
+ * Called by when the patch memory is reinitialized.
+ *
+ * @param pVM The cross context VM structure.
+ */
+void patmR3DbgReset(PVM pVM)
+{
+ if (pVM->patm.s.hDbgModPatchMem != NIL_RTDBGMOD)
+ {
+ RTDbgModRemoveAll(pVM->patm.s.hDbgModPatchMem, true);
+ }
+}
+
+
+static size_t patmR3DbgDescribePatchAsSymbol(PPATMPATCHREC pPatchRec, char *pszName, size_t cbLeft)
+{
+ char * const pszNameStart = pszName;
+#define ADD_SZ(a_sz) \
+ do { \
+ if (cbLeft >= sizeof(a_sz)) \
+ { \
+ memcpy(pszName, a_sz, sizeof(a_sz)); \
+ pszName += sizeof(a_sz) - 1; \
+ cbLeft -= sizeof(a_sz) - 1;\
+ }\
+ } while (0)
+
+ /* Start the name off with the address of the guest code. */
+ size_t cch = RTStrPrintf(pszName, cbLeft, "Patch_%#08x", pPatchRec->patch.pPrivInstrGC);
+ cbLeft -= cch;
+ pszName += cch;
+
+ /* Append flags. */
+ uint64_t fFlags = pPatchRec->patch.flags;
+ if (fFlags & PATMFL_INTHANDLER)
+ ADD_SZ("_IntHandler");
+ if (fFlags & PATMFL_SYSENTER)
+ ADD_SZ("_SysEnter");
+ if (fFlags & PATMFL_GUEST_SPECIFIC)
+ ADD_SZ("_GuestSpecific");
+ if (fFlags & PATMFL_USER_MODE)
+ ADD_SZ("_UserMode");
+ if (fFlags & PATMFL_IDTHANDLER)
+ ADD_SZ("_IdtHnd");
+ if (fFlags & PATMFL_TRAPHANDLER)
+ ADD_SZ("_TrapHnd");
+ if (fFlags & PATMFL_DUPLICATE_FUNCTION)
+ ADD_SZ("_DupFunc");
+ if (fFlags & PATMFL_REPLACE_FUNCTION_CALL)
+ ADD_SZ("_ReplFunc");
+ if (fFlags & PATMFL_TRAPHANDLER_WITH_ERRORCODE)
+ ADD_SZ("_TrapHndErrCd");
+ if (fFlags & PATMFL_MMIO_ACCESS)
+ ADD_SZ("_MmioAccess");
+ if (fFlags & PATMFL_SYSENTER_XP)
+ ADD_SZ("_SysEnterXP");
+ if (fFlags & PATMFL_INT3_REPLACEMENT)
+ ADD_SZ("_Int3Repl");
+ if (fFlags & PATMFL_SUPPORT_CALLS)
+ ADD_SZ("_SupCalls");
+ if (fFlags & PATMFL_SUPPORT_INDIRECT_CALLS)
+ ADD_SZ("_SupIndirCalls");
+ if (fFlags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
+ ADD_SZ("_IdtHandlerWE");
+ if (fFlags & PATMFL_INHIBIT_IRQS)
+ ADD_SZ("_InhibitIrqs");
+ if (fFlags & PATMFL_RECOMPILE_NEXT)
+ ADD_SZ("_RecompileNext");
+ if (fFlags & PATMFL_CALLABLE_AS_FUNCTION)
+ ADD_SZ("_Callable");
+ if (fFlags & PATMFL_TRAMPOLINE)
+ ADD_SZ("_Trampoline");
+ if (fFlags & PATMFL_PATCHED_GUEST_CODE)
+ ADD_SZ("_PatchedGuestCode");
+ if (fFlags & PATMFL_MUST_INSTALL_PATCHJMP)
+ ADD_SZ("_MustInstallPatchJmp");
+ if (fFlags & PATMFL_INT3_REPLACEMENT_BLOCK)
+ ADD_SZ("_Int3ReplBlock");
+ if (fFlags & PATMFL_EXTERNAL_JUMP_INSIDE)
+ ADD_SZ("_ExtJmp");
+ if (fFlags & PATMFL_CODE_REFERENCED)
+ ADD_SZ("_CodeRefed");
+
+ return pszName - pszNameStart;
+}
+
+
+/**
+ * Called when a new patch is added or when first populating the address space.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pPatchRec The patch record.
+ */
+void patmR3DbgAddPatch(PVM pVM, PPATMPATCHREC pPatchRec)
+{
+ if ( pVM->patm.s.hDbgModPatchMem != NIL_RTDBGMOD
+ && pPatchRec->patch.pPatchBlockOffset > 0
+ && !(pPatchRec->patch.flags & PATMFL_GLOBAL_FUNCTIONS))
+ {
+ /** @todo find a cheap way of checking whether we've already added the patch.
+ * Using a flag would be nice, except I don't want to consider saved
+ * state considerations right now (I don't recall if we're still
+ * depending on structure layout there or not). */
+ char szName[256];
+ size_t off = patmR3DbgDescribePatchAsSymbol(pPatchRec, szName, sizeof(szName));
+
+ /* If we have a symbol near the guest address, append that. */
+ if (off + 8 <= sizeof(szName))
+ {
+ RTDBGSYMBOL Symbol;
+ RTGCINTPTR offDisp;
+ DBGFADDRESS Addr;
+
+ int rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL,
+ DBGFR3AddrFromFlat(pVM->pUVM, &Addr, pPatchRec->patch.pPrivInstrGC),
+ RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL,
+ &offDisp, &Symbol, NULL /*phMod*/);
+ if (RT_SUCCESS(rc))
+ {
+ szName[off++] = '_';
+ szName[off++] = '_';
+ RTStrCopy(&szName[off], sizeof(szName) - off, Symbol.szName);
+ }
+ }
+
+ /* Add it (may fail due to enable/disable patches). */
+ RTDbgModSymbolAdd(pVM->patm.s.hDbgModPatchMem, szName, 0 /*iSeg*/,
+ pPatchRec->patch.pPatchBlockOffset,
+ pPatchRec->patch.cbPatchBlockSize,
+ 0 /*fFlags*/, NULL /*piOrdinal*/);
+ }
+}
+
+
+/**
+ * Enumeration callback used by patmR3DbgAddPatches
+ *
+ * @returns 0 (continue enum)
+ * @param pNode The patch record node.
+ * @param pvUser The cross context VM structure.
+ */
+static DECLCALLBACK(int) patmR3DbgAddPatchCallback(PAVLOU32NODECORE pNode, void *pvUser)
+{
+ patmR3DbgAddPatch((PVM)pvUser, (PPATMPATCHREC)pNode);
+ return 0;
+}
+
+
+/**
+ * Populates an empty "patches" (hDbgModPatchMem) module with patch symbols.
+ *
+ * @param pVM The cross context VM structure.
+ * @param hDbgMod The debug module handle.
+ */
+static void patmR3DbgAddPatches(PVM pVM, RTDBGMOD hDbgMod)
+{
+ /*
+ * Global functions and a start marker.
+ */
+ ADD_FUNC(hDbgMod, pVM->patm.s.pPatchMemGC, pVM->patm.s.pfnHelperCallGC, PATMLookupAndCallRecord.size, "PATMLookupAndCall");
+ ADD_FUNC(hDbgMod, pVM->patm.s.pPatchMemGC, pVM->patm.s.pfnHelperRetGC, PATMRetFunctionRecord.size, "PATMRetFunction");
+ ADD_FUNC(hDbgMod, pVM->patm.s.pPatchMemGC, pVM->patm.s.pfnHelperJumpGC, PATMLookupAndJumpRecord.size, "PATMLookupAndJump");
+ ADD_FUNC(hDbgMod, pVM->patm.s.pPatchMemGC, pVM->patm.s.pfnHelperIretGC, PATMIretFunctionRecord.size, "PATMIretFunction");
+
+ ADD_FUNC(hDbgMod, pVM->patm.s.pPatchMemGC, pVM->patm.s.pPatchMemGC, 0, "PatchMemStart");
+ ADD_FUNC(hDbgMod, pVM->patm.s.pPatchMemGC, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, "PATMStack");
+
+ /*
+ * The patches.
+ */
+ RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true /*fFromLeft*/, patmR3DbgAddPatchCallback, pVM);
+}
+
+
+/**
+ * Populate DBGF_AS_RC with PATM symbols.
+ *
+ * Called by dbgfR3AsLazyPopulate when DBGF_AS_RC or DBGF_AS_RC_AND_GC_GLOBAL is
+ * accessed for the first time.
+ *
+ * @param pVM The cross context VM structure.
+ * @param hDbgAs The DBGF_AS_RC address space handle.
+ */
+VMMR3_INT_DECL(void) PATMR3DbgPopulateAddrSpace(PVM pVM, RTDBGAS hDbgAs)
+{
+ AssertReturnVoid(!HMIsEnabled(pVM));
+
+ /*
+ * Add a fake debug module for the PATMGCSTATE structure.
+ */
+ RTDBGMOD hDbgMod;
+ int rc = RTDbgModCreate(&hDbgMod, "patmgcstate", sizeof(PATMGCSTATE), 0 /*fFlags*/);
+ if (RT_SUCCESS(rc))
+ {
+ ADD_MEMBER(hDbgMod, PATMGCSTATE, uVMFlags, "uVMFlags");
+ ADD_MEMBER(hDbgMod, PATMGCSTATE, uPendingAction, "uPendingAction");
+ ADD_MEMBER(hDbgMod, PATMGCSTATE, uPatchCalls, "uPatchCalls");
+ ADD_MEMBER(hDbgMod, PATMGCSTATE, uScratch, "uScratch");
+ ADD_MEMBER(hDbgMod, PATMGCSTATE, uIretEFlags, "uIretEFlags");
+ ADD_MEMBER(hDbgMod, PATMGCSTATE, uIretCS, "uIretCS");
+ ADD_MEMBER(hDbgMod, PATMGCSTATE, uIretEIP, "uIretEIP");
+ ADD_MEMBER(hDbgMod, PATMGCSTATE, Psp, "Psp");
+ ADD_MEMBER(hDbgMod, PATMGCSTATE, fPIF, "fPIF");
+ ADD_MEMBER(hDbgMod, PATMGCSTATE, GCPtrInhibitInterrupts, "GCPtrInhibitInterrupts");
+ ADD_MEMBER(hDbgMod, PATMGCSTATE, GCCallPatchTargetAddr, "GCCallPatchTargetAddr");
+ ADD_MEMBER(hDbgMod, PATMGCSTATE, GCCallReturnAddr, "GCCallReturnAddr");
+ ADD_MEMBER(hDbgMod, PATMGCSTATE, Restore.uEAX, "Restore.uEAX");
+ ADD_MEMBER(hDbgMod, PATMGCSTATE, Restore.uECX, "Restore.uECX");
+ ADD_MEMBER(hDbgMod, PATMGCSTATE, Restore.uEDI, "Restore.uEDI");
+ ADD_MEMBER(hDbgMod, PATMGCSTATE, Restore.eFlags, "Restore.eFlags");
+ ADD_MEMBER(hDbgMod, PATMGCSTATE, Restore.uFlags, "Restore.uFlags");
+
+ rc = RTDbgAsModuleLink(hDbgAs, hDbgMod, pVM->patm.s.pGCStateGC, 0 /*fFlags/*/);
+ AssertLogRelRC(rc);
+ RTDbgModRelease(hDbgMod);
+ }
+
+ /*
+ * Add something for the stats so we get some kind of symbols for
+ * references to them while disassembling patches.
+ */
+ rc = RTDbgModCreate(&hDbgMod, "patmstats", PATM_STAT_MEMSIZE, 0 /*fFlags*/);
+ if (RT_SUCCESS(rc))
+ {
+ ADD_FUNC(hDbgMod, pVM->patm.s.pStatsGC, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, "PATMMemStatsStart");
+
+ rc = RTDbgAsModuleLink(hDbgAs, hDbgMod, pVM->patm.s.pStatsGC, 0 /*fFlags/*/);
+ AssertLogRelRC(rc);
+ RTDbgModRelease(hDbgMod);
+ }
+
+ /*
+ * Add a fake debug module for the patches and stack.
+ */
+ rc = RTDbgModCreate(&hDbgMod, "patches", pVM->patm.s.cbPatchMem + PATM_STACK_TOTAL_SIZE + PAGE_SIZE, 0 /*fFlags*/);
+ if (RT_SUCCESS(rc))
+ {
+ pVM->patm.s.hDbgModPatchMem = hDbgMod;
+ patmR3DbgAddPatches(pVM, hDbgMod);
+
+ rc = RTDbgAsModuleLink(hDbgAs, hDbgMod, pVM->patm.s.pPatchMemGC, 0 /*fFlags/*/);
+ AssertLogRelRC(rc);
+ }
+}
+
+
+/**
+ * Annotates an instruction if patched.
+ *
+ * @param pVM The VM handle.
+ * @param RCPtr The instruction address.
+ * @param cbInstr The instruction length.
+ * @param pszBuf The output buffer. This will be an empty string
+ * if the instruction wasn't patched. If it's
+ * patched, it will hold a symbol-like string
+ * describing the patch.
+ * @param cbBuf The size of the output buffer.
+ */
+VMMR3_INT_DECL(void) PATMR3DbgAnnotatePatchedInstruction(PVM pVM, RTRCPTR RCPtr, uint8_t cbInstr, char *pszBuf, size_t cbBuf)
+{
+ /*
+ * Always zero the buffer.
+ */
+ AssertReturnVoid(cbBuf > 0);
+ *pszBuf = '\0';
+
+ /*
+ * Drop out immediately if it cannot be a patched instruction.
+ */
+ if (!PATMIsEnabled(pVM))
+ return;
+ if ( RCPtr < pVM->patm.s.pPatchedInstrGCLowest
+ || RCPtr > pVM->patm.s.pPatchedInstrGCHighest)
+ return;
+
+ /*
+ * Look for a patch record covering any part of the instruction.
+ *
+ * The first query results in a patched less or equal to RCPtr. While the
+ * second results in one that's greater than RCPtr.
+ */
+ PPATMPATCHREC pPatchRec;
+ pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, RCPtr, false /*fFromAbove*/);
+ if ( !pPatchRec
+ || RCPtr - pPatchRec->patch.pPrivInstrGC > pPatchRec->patch.cbPrivInstr)
+ {
+ pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, RCPtr, true /*fFromAbove*/);
+ if ( !pPatchRec
+ || (RTRCPTR)(RCPtr + cbInstr) < pPatchRec->patch.pPrivInstrGC )
+ return;
+ }
+
+ /*
+ * Lazy bird uses the symbol name generation code for describing the patch.
+ */
+ size_t off = patmR3DbgDescribePatchAsSymbol(pPatchRec, pszBuf, cbBuf);
+ if (off + 1 < cbBuf)
+ {
+ const char *pszState;
+ switch (pPatchRec->patch.uState)
+ {
+ case PATCH_REFUSED: pszState = "Refused"; break;
+ case PATCH_DISABLED: pszState = "Disabled"; break;
+ case PATCH_ENABLED: pszState = "Enabled"; break;
+ case PATCH_UNUSABLE: pszState = "Unusable"; break;
+ case PATCH_DIRTY: pszState = "Dirty"; break;
+ case PATCH_DISABLE_PENDING: pszState = "DisablePending"; break;
+ default: pszState = "State???"; AssertFailed(); break;
+ }
+
+ if (pPatchRec->patch.cbPatchBlockSize > 0)
+ off += RTStrPrintf(&pszBuf[off], cbBuf - off, " - %s (%u b) - %#x LB %#x",
+ pszState, pPatchRec->patch.cbPatchJump,
+ pPatchRec->patch.pPatchBlockOffset + pVM->patm.s.pPatchMemGC,
+ pPatchRec->patch.cbPatchBlockSize);
+ else
+ off += RTStrPrintf(&pszBuf[off], cbBuf - off, " - %s (%u b)", pszState, pPatchRec->patch.cbPatchJump);
+ }
+
+}
+
diff --git a/src/VBox/VMM/VMMR3/PATMSSM.cpp b/src/VBox/VMM/VMMR3/PATMSSM.cpp
index 2692a837..ea44713c 100644
--- a/src/VBox/VMM/VMMR3/PATMSSM.cpp
+++ b/src/VBox/VMM/VMMR3/PATMSSM.cpp
@@ -6,7 +6,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -42,6 +42,7 @@
#include <iprt/string.h>
#include <VBox/dis.h>
#include <VBox/disopcode.h>
+#include <VBox/version.h>
/**
* Patch information - SSM version.
@@ -136,11 +137,6 @@ typedef struct PATMPATCHRECSSM
PATCHINFOSSM patch;
} PATMPATCHRECSSM, *PPATMPATCHRECSSM;
-/*******************************************************************************
-* Defined Constants And Macros *
-*******************************************************************************/
-#define PATM_SUBTRACT_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) - (uintptr_t)(b)
-#define PATM_ADD_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) + (uintptr_t)(b)
/*******************************************************************************
* Internal Functions *
@@ -248,6 +244,105 @@ static SSMFIELD const g_aPatmFields[] =
};
/**
+ * SSM descriptor table for the PATM structure starting with r86139.
+ */
+static SSMFIELD const g_aPatmFields86139[] =
+{
+ /** @todo there are a bunch more fields here which can be marked as ignored. */
+ SSMFIELD_ENTRY_IGNORE( PATM, offVM),
+ SSMFIELD_ENTRY_RCPTR( PATM, pPatchMemGC),
+ SSMFIELD_ENTRY_IGN_HCPTR( PATM, pPatchMemHC),
+ SSMFIELD_ENTRY( PATM, cbPatchMem),
+ SSMFIELD_ENTRY( PATM, offPatchMem),
+ SSMFIELD_ENTRY( PATM, fOutOfMemory),
+ SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
+ SSMFIELD_ENTRY( PATM, deltaReloc),
+ SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStateHC),
+ SSMFIELD_ENTRY_RCPTR( PATM, pGCStateGC),
+ SSMFIELD_ENTRY_RCPTR( PATM, pGCStackGC),
+ SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStackHC),
+ SSMFIELD_ENTRY_RCPTR( PATM, pCPUMCtxGC),
+ SSMFIELD_ENTRY_RCPTR( PATM, pStatsGC),
+ SSMFIELD_ENTRY_IGN_HCPTR( PATM, pStatsHC),
+ SSMFIELD_ENTRY( PATM, uCurrentPatchIdx),
+ SSMFIELD_ENTRY( PATM, ulCallDepth),
+ SSMFIELD_ENTRY( PATM, cPageRecords),
+ SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCLowest),
+ SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCHighest),
+ SSMFIELD_ENTRY_RCPTR( PATM, PatchLookupTreeGC),
+ SSMFIELD_ENTRY_IGN_HCPTR( PATM, PatchLookupTreeHC),
+ SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperCallGC),
+ SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperRetGC),
+ SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperJumpGC),
+ SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperIretGC),
+ SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGlobalPatchRec),
+ SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterGC),
+ SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterPatchGC),
+ SSMFIELD_ENTRY( PATM, uSysEnterPatchIdx),
+ SSMFIELD_ENTRY_RCPTR( PATM, pvFaultMonitor),
+ SSMFIELD_ENTRY_GCPHYS( PATM, mmio.GCPhys),
+ SSMFIELD_ENTRY_RCPTR( PATM, mmio.pCachedData),
+ SSMFIELD_ENTRY_IGN_RCPTR( PATM, mmio.Alignment0),
+ SSMFIELD_ENTRY_IGN_HCPTR( PATM, savedstate.pSSM),
+ SSMFIELD_ENTRY( PATM, savedstate.cPatches),
+ SSMFIELD_ENTRY_PAD_HC64( PATM, savedstate.Alignment0, sizeof(uint32_t)),
+ SSMFIELD_ENTRY_IGN_HCPTR( PATM, hDbgModPatchMem),
+ SSMFIELD_ENTRY_PAD_HC32( PATM, Alignment0, sizeof(uint32_t)),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatNrOpcodeRead),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatDisabled),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatUnusable),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatEnabled),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatInstalled),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledFunctionPatches),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledTrampoline),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledJump),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatInt3Callable),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatInt3BlockRun),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatOverwritten),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatFixedConflicts),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatFlushed),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatPageBoundaryCrossed),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatMonitored),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatHandleTrap),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBack),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBackFail),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatPATMMemoryUsed),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQSuccess),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQFailed),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateUseExisting),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionFound),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionNotFound),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWrite),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteDetect),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatDirty),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatPushTrap),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpreted),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpretedFailed),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatSysEnter),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatSysExit),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIret),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIretFailed),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirty),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyGood),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyBad),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageInserted),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageRemoved),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshSuccess),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshFailed),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatGenRet),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatGenRetReused),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatGenJump),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatGenCall),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatGenPopf),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatCheckPendingIRQ),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupReplace),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupInsert),
+ SSMFIELD_ENTRY_IGNORE( PATM, StatU32FunctionMaxSlotsUsed),
+ SSMFIELD_ENTRY_IGNORE( PATM, Alignment0),
+ SSMFIELD_ENTRY_TERM()
+};
+
+/**
* SSM descriptor table for the PATMGCSTATE structure.
*/
static SSMFIELD const g_aPatmGCStateFields[] =
@@ -432,7 +527,7 @@ static DECLCALLBACK(int) patmSaveP2GLookupRecords(PAVLU32NODECORE pNode, void *p
PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)pNode;
/* Save the lookup record. */
- int rc = SSMR3PutMem(pSSM, pPatchToGuestRec, sizeof(RECPATCHTOGUEST));
+ int rc = SSMR3PutStructEx(pSSM, pPatchToGuestRec, sizeof(RECPATCHTOGUEST), 0 /*fFlags*/, &g_aPatmRecPatchToGuest[0], NULL);
AssertRCReturn(rc, rc);
return VINF_SUCCESS;
@@ -454,9 +549,16 @@ static DECLCALLBACK(int) patmSaveFixupRecords(PAVLPVNODECORE pNode, void *pVM1)
RELOCREC rec = *(PRELOCREC)pNode;
RTRCPTR *pFixup = (RTRCPTR *)rec.pRelocPos;
+ /* Convert pointer to an offset into patch memory. May not be applicable
+ to all fixup types, thus the UINT32_MAX. */
Assert(rec.pRelocPos);
- /* Convert pointer to an offset into patch memory. */
- PATM_SUBTRACT_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
+ uintptr_t offRelocPos = (uintptr_t)rec.pRelocPos - (uintptr_t)pVM->patm.s.pPatchMemHC;
+ if (offRelocPos > pVM->patm.s.cbPatchMem)
+ offRelocPos = UINT32_MAX;
+ rec.pRelocPos = (uint8_t *)offRelocPos;
+
+ /* Zero rec.Core.Key since it's unused and may trigger SSM check due to the hack below. */
+ rec.Core.Key = 0;
if (rec.uType == FIXUP_ABSOLUTE)
{
@@ -478,7 +580,7 @@ static DECLCALLBACK(int) patmSaveFixupRecords(PAVLPVNODECORE pNode, void *pVM1)
}
/* Save the lookup record. */
- int rc = SSMR3PutMem(pSSM, &rec, sizeof(rec));
+ int rc = SSMR3PutStructEx(pSSM, &rec, sizeof(rec), 0 /*fFlags*/, &g_aPatmRelocRec[0], NULL);
AssertRCReturn(rc, rc);
return VINF_SUCCESS;
@@ -594,6 +696,9 @@ static DECLCALLBACK(int) patmSavePatchState(PAVLOU32NODECORE pNode, void *pVM1)
Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
patmR3PatchConvertMem2SSM(&patch, pPatch);
+ Log4(("patmSavePatchState: cbPatchJump=%u uCurPathOffset=%#x pInstrGCLowest/Higest=%#x/%#x nrFixups=%#x nrJumpRecs=%#x\n",
+ patch.patch.cbPatchJump, patch.patch.uCurPatchOffset, patch.patch.pInstrGCLowest, patch.patch.pInstrGCHighest,
+ patch.patch.nrFixups, patch.patch.nrJumpRecs));
/*
* Reset HC pointers that need to be recalculated when loading the state
@@ -605,7 +710,7 @@ static DECLCALLBACK(int) patmSavePatchState(PAVLOU32NODECORE pNode, void *pVM1)
Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->IllegalInstrTree == 0);
/* Save the patch record itself */
- rc = SSMR3PutMem(pSSM, &patch, sizeof(patch));
+ rc = SSMR3PutStructEx(pSSM, &patch, sizeof(patch), 0 /*fFlags*/, &g_aPatmPatchRecFields[0], NULL);
AssertRCReturn(rc, rc);
/*
@@ -616,7 +721,8 @@ static DECLCALLBACK(int) patmSavePatchState(PAVLOU32NODECORE pNode, void *pVM1)
RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmCountLeafPV, &nrFixupRecs);
AssertMsg(nrFixupRecs == pPatch->patch.nrFixups, ("Fixup inconsistency! counted %d vs %d\n", nrFixupRecs, pPatch->patch.nrFixups));
#endif
- RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmSaveFixupRecords, pVM);
+ rc = RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmSaveFixupRecords, pVM);
+ AssertRCReturn(rc, rc);
#ifdef VBOX_STRICT
uint32_t nrLookupRecords = 0;
@@ -624,7 +730,9 @@ static DECLCALLBACK(int) patmSavePatchState(PAVLOU32NODECORE pNode, void *pVM1)
Assert(nrLookupRecords == pPatch->patch.nrPatch2GuestRecs);
#endif
- RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmSaveP2GLookupRecords, pVM);
+ rc = RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmSaveP2GLookupRecords, pVM);
+ AssertRCReturn(rc, rc);
+
return VINF_SUCCESS;
}
@@ -660,7 +768,7 @@ DECLCALLBACK(int) patmR3Save(PVM pVM, PSSMHANDLE pSSM)
/*
* Save PATM structure
*/
- rc = SSMR3PutMem(pSSM, &patmInfo, sizeof(patmInfo));
+ rc = SSMR3PutStructEx(pSSM, &patmInfo, sizeof(patmInfo), 0 /*fFlags*/, &g_aPatmFields[0], NULL);
AssertRCReturn(rc, rc);
/*
@@ -672,12 +780,13 @@ DECLCALLBACK(int) patmR3Save(PVM pVM, PSSMHANDLE pSSM)
/*
* Save GC state memory
*/
- rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
+ rc = SSMR3PutStructEx(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE), 0 /*fFlags*/, &g_aPatmGCStateFields[0], NULL);
AssertRCReturn(rc, rc);
/*
* Save PATM stack page
*/
+ SSMR3PutU32(pSSM, PATM_STACK_TOTAL_SIZE);
rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
AssertRCReturn(rc, rc);
@@ -692,6 +801,7 @@ DECLCALLBACK(int) patmR3Save(PVM pVM, PSSMHANDLE pSSM)
return VINF_SUCCESS;
}
+
/**
* Execute state load operation.
*
@@ -707,6 +817,7 @@ DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32
int rc;
if ( uVersion != PATM_SSM_VERSION
+ && uVersion != PATM_SSM_VERSION_MEM
&& uVersion != PATM_SSM_VERSION_FIXUP_HACK
&& uVersion != PATM_SSM_VERSION_VER16
)
@@ -714,6 +825,7 @@ DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32
AssertMsgFailed(("patmR3Load: Invalid version uVersion=%d!\n", uVersion));
return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
}
+ uint32_t const fStructRestoreFlags = uVersion <= PATM_SSM_VERSION_MEM ? SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED : 0;
Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
pVM->patm.s.savedstate.pSSM = pSSM;
@@ -722,25 +834,27 @@ DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32
* Restore PATM structure
*/
RT_ZERO(patmInfo);
- rc = SSMR3GetStructEx(pSSM, &patmInfo, sizeof(patmInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmFields[0], NULL);
+ if ( uVersion == PATM_SSM_VERSION_MEM
+ && SSMR3HandleRevision(pSSM) >= 86139
+ && SSMR3HandleVersion(pSSM) >= VBOX_FULL_VERSION_MAKE(4, 2, 51))
+ rc = SSMR3GetStructEx(pSSM, &patmInfo, sizeof(patmInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED,
+ &g_aPatmFields86139[0], NULL);
+ else
+ rc = SSMR3GetStructEx(pSSM, &patmInfo, sizeof(patmInfo), fStructRestoreFlags, &g_aPatmFields[0], NULL);
AssertRCReturn(rc, rc);
/* Relative calls are made to the helper functions. Therefor their relative location must not change! */
/* Note: we reuse the saved global helpers and assume they are identical, which is kind of dangerous. */
- if ( (pVM->patm.s.pfnHelperCallGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperCallGC - patmInfo.pPatchMemGC)
- || (pVM->patm.s.pfnHelperRetGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperRetGC - patmInfo.pPatchMemGC)
- || (pVM->patm.s.pfnHelperJumpGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperJumpGC - patmInfo.pPatchMemGC)
- || (pVM->patm.s.pfnHelperIretGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperIretGC - patmInfo.pPatchMemGC))
- {
- AssertMsgFailed(("Helper function ptrs don't match!!!\n"));
- return VERR_SSM_INVALID_STATE;
- }
+ AssertLogRelReturn((pVM->patm.s.pfnHelperCallGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperCallGC - patmInfo.pPatchMemGC),
+ VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+ AssertLogRelReturn((pVM->patm.s.pfnHelperRetGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperRetGC - patmInfo.pPatchMemGC),
+ VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+ AssertLogRelReturn((pVM->patm.s.pfnHelperJumpGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperJumpGC - patmInfo.pPatchMemGC),
+ VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+ AssertLogRelReturn((pVM->patm.s.pfnHelperIretGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperIretGC - patmInfo.pPatchMemGC),
+ VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+ AssertLogRelReturn(pVM->patm.s.cbPatchMem == patmInfo.cbPatchMem, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
- if (pVM->patm.s.cbPatchMem != patmInfo.cbPatchMem)
- {
- AssertMsgFailed(("Patch memory ptrs and/or sizes don't match!!!\n"));
- return VERR_SSM_INVALID_STATE;
- }
pVM->patm.s.offPatchMem = patmInfo.offPatchMem;
pVM->patm.s.deltaReloc = patmInfo.deltaReloc;
pVM->patm.s.uCurrentPatchIdx = patmInfo.uCurrentPatchIdx;
@@ -776,14 +890,26 @@ DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32
* Restore GC state memory
*/
RT_BZERO(pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
- rc = SSMR3GetStructEx(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmGCStateFields[0], NULL);
+ rc = SSMR3GetStructEx(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE), fStructRestoreFlags, &g_aPatmGCStateFields[0], NULL);
AssertRCReturn(rc, rc);
/*
* Restore PATM stack page
*/
- rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
+ uint32_t cbStack = PATM_STACK_TOTAL_SIZE;
+ if (uVersion > PATM_SSM_VERSION_MEM)
+ {
+ rc = SSMR3GetU32(pSSM, &cbStack);
+ AssertRCReturn(rc, rc);
+ }
+ AssertCompile(!(PATM_STACK_TOTAL_SIZE & 31));
+ AssertLogRelMsgReturn(cbStack > 0 && cbStack <= PATM_STACK_TOTAL_SIZE && !(cbStack & 31),
+ ("cbStack=%#x vs %#x", cbStack, PATM_STACK_TOTAL_SIZE),
+ VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
+ rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStackHC, cbStack);
AssertRCReturn(rc, rc);
+ if (cbStack < PATM_STACK_TOTAL_SIZE)
+ memset((uint8_t *)pVM->patm.s.pGCStackHC + cbStack, 0, PATM_STACK_TOTAL_SIZE - cbStack);
/*
* Load all patches
@@ -794,8 +920,11 @@ DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32
PATMPATCHREC *pPatchRec;
RT_ZERO(patch);
- rc = SSMR3GetStructEx(pSSM, &patch, sizeof(patch), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmPatchRecFields[0], NULL);
+ rc = SSMR3GetStructEx(pSSM, &patch, sizeof(patch), fStructRestoreFlags, &g_aPatmPatchRecFields[0], NULL);
AssertRCReturn(rc, rc);
+ Log4(("patmR3Load: cbPatchJump=%u uCurPathOffset=%#x pInstrGCLowest/Higest=%#x/%#x nrFixups=%#x nrJumpRecs=%#x\n",
+ patch.patch.cbPatchJump, patch.patch.uCurPatchOffset, patch.patch.pInstrGCLowest, patch.patch.pInstrGCHighest,
+ patch.patch.nrFixups, patch.patch.nrJumpRecs));
Assert(!(patch.patch.flags & PATMFL_GLOBAL_FUNCTIONS));
@@ -809,7 +938,7 @@ DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32
/* Convert SSM version to memory. */
patmR3PatchConvertSSM2Mem(pPatchRec, &patch);
- Log(("Restoring patch %RRv -> %RRv\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset));
+ Log(("Restoring patch %RRv -> %RRv state %x\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset, pPatchRec->patch.uState));
bool ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
Assert(ret);
if (pPatchRec->patch.uState != PATCH_REFUSED)
@@ -828,7 +957,7 @@ DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32
RT_ZERO(cacheRec);
cacheRec.pPatch = &pPatchRec->patch;
- uint8_t *pPrivInstrHC = PATMGCVirtToHCVirt(pVM, &cacheRec, pPatchRec->patch.pPrivInstrGC);
+ uint8_t *pPrivInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pPatchRec->patch.pPrivInstrGC);
/* Can fail due to page or page table not present. */
/*
@@ -843,7 +972,7 @@ DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32
RTRCPTR *pFixup;
RT_ZERO(rec);
- rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmRelocRec[0], NULL);
+ rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), fStructRestoreFlags, &g_aPatmRelocRec[0], NULL);
AssertRCReturn(rc, rc);
if (pPrivInstrHC)
@@ -851,7 +980,10 @@ DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32
/* rec.pRelocPos now contains the relative position inside the hypervisor area. */
offset = (int32_t)(intptr_t)rec.pRelocPos;
/* Convert to HC pointer again. */
- PATM_ADD_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
+ if ((uintptr_t)rec.pRelocPos < pVM->patm.s.cbPatchMem)
+ rec.pRelocPos = pVM->patm.s.pPatchMemHC + (uintptr_t)rec.pRelocPos;
+ else
+ rec.pRelocPos = NULL;
pFixup = (RTRCPTR *)rec.pRelocPos;
if (pPatchRec->patch.uState != PATCH_REFUSED)
@@ -891,10 +1023,10 @@ DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32
for (uint32_t j=0;j<nrPatch2GuestRecs;j++)
{
RT_ZERO(rec);
- rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmRecPatchToGuest[0], NULL);
+ rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), fStructRestoreFlags, &g_aPatmRecPatchToGuest[0], NULL);
AssertRCReturn(rc, rc);
- patmr3AddP2GLookupRecord(pVM, &pPatchRec->patch, (uintptr_t)rec.Core.Key + pVM->patm.s.pPatchMemHC, rec.pOrgInstrGC, rec.enmType, rec.fDirty);
+ patmR3AddP2GLookupRecord(pVM, &pPatchRec->patch, (uintptr_t)rec.Core.Key + pVM->patm.s.pPatchMemHC, rec.pOrgInstrGC, rec.enmType, rec.fDirty);
}
Assert(pPatchRec->patch.Patch2GuestAddrTree);
}
@@ -1169,7 +1301,7 @@ static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPA
cCpuidFixup++;
}
else
- if (ulSSMVersion >= PATM_SSM_VERSION)
+ if (ulSSMVersion >= PATM_SSM_VERSION_MEM)
{
#ifdef LOG_ENABLED
RTRCPTR oldFixup = *pFixup;
diff --git a/src/VBox/VMM/VMMR3/PDM.cpp b/src/VBox/VMM/VMMR3/PDM.cpp
index 07296ccc..652875c4 100644
--- a/src/VBox/VMM/VMMR3/PDM.cpp
+++ b/src/VBox/VMM/VMMR3/PDM.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2012 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -255,6 +255,7 @@
#include <VBox/vmm/mm.h>
#include <VBox/vmm/pgm.h>
#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/hm.h>
#include <VBox/vmm/vm.h>
#include <VBox/vmm/uvm.h>
#include <VBox/vmm/vmm.h>
@@ -336,12 +337,13 @@ static FNDBGFHANDLERINT pdmR3InfoTracingIds;
* @returns VBox status code.
* @param pUVM Pointer to the user mode VM structure.
*/
-VMMR3DECL(int) PDMR3InitUVM(PUVM pUVM)
+VMMR3_INT_DECL(int) PDMR3InitUVM(PUVM pUVM)
{
AssertCompile(sizeof(pUVM->pdm.s) <= sizeof(pUVM->pdm.padding));
AssertRelease(sizeof(pUVM->pdm.s) <= sizeof(pUVM->pdm.padding));
pUVM->pdm.s.pModules = NULL;
pUVM->pdm.s.pCritSects = NULL;
+ pUVM->pdm.s.pRwCritSects = NULL;
return RTCritSectInit(&pUVM->pdm.s.ListCritSect);
}
@@ -352,7 +354,7 @@ VMMR3DECL(int) PDMR3InitUVM(PUVM pUVM)
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) PDMR3Init(PVM pVM)
+VMMR3_INT_DECL(int) PDMR3Init(PVM pVM)
{
LogFlow(("PDMR3Init\n"));
@@ -373,7 +375,7 @@ VMMR3DECL(int) PDMR3Init(PVM pVM)
/*
* Initialize critical sections first.
*/
- int rc = pdmR3CritSectInitStats(pVM);
+ int rc = pdmR3CritSectBothInitStats(pVM);
if (RT_SUCCESS(rc))
rc = PDMR3CritSectInit(pVM, &pVM->pdm.s.CritSect, RT_SRC_POS, "PDM");
if (RT_SUCCESS(rc))
@@ -444,7 +446,7 @@ VMMR3DECL(int) PDMR3Init(PVM pVM)
* @remark The loader subcomponent is relocated by PDMR3LdrRelocate() very
* early in the relocation phase.
*/
-VMMR3DECL(void) PDMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
+VMMR3_INT_DECL(void) PDMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
{
LogFlow(("PDMR3Relocate\n"));
@@ -457,7 +459,7 @@ VMMR3DECL(void) PDMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
/*
* Critical sections.
*/
- pdmR3CritSectRelocate(pVM);
+ pdmR3CritSectBothRelocate(pVM);
/*
* The registered PIC.
@@ -513,13 +515,20 @@ VMMR3DECL(void) PDMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
/*
* Devices & Drivers.
*/
- PCPDMDEVHLPRC pDevHlpRC;
- int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCDevHlp", &pDevHlpRC);
- AssertReleaseMsgRC(rc, ("rc=%Rrc when resolving g_pdmRCDevHlp\n", rc));
+ int rc;
+ PCPDMDEVHLPRC pDevHlpRC = NIL_RTRCPTR;
+ if (!HMIsEnabled(pVM))
+ {
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCDevHlp", &pDevHlpRC);
+ AssertReleaseMsgRC(rc, ("rc=%Rrc when resolving g_pdmRCDevHlp\n", rc));
+ }
- PCPDMDRVHLPRC pDrvHlpRC;
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCDevHlp", &pDrvHlpRC);
- AssertReleaseMsgRC(rc, ("rc=%Rrc when resolving g_pdmRCDevHlp\n", rc));
+ PCPDMDRVHLPRC pDrvHlpRC = NIL_RTRCPTR;
+ if (!HMIsEnabled(pVM))
+ {
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCDevHlp", &pDrvHlpRC);
+ AssertReleaseMsgRC(rc, ("rc=%Rrc when resolving g_pdmRCDevHlp\n", rc));
+ }
for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
{
@@ -616,7 +625,7 @@ static void pdmR3TermLuns(PVM pVM, PPDMLUN pLun, const char *pszDevice, unsigned
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) PDMR3Term(PVM pVM)
+VMMR3_INT_DECL(int) PDMR3Term(PVM pVM)
{
LogFlow(("PDMR3Term:\n"));
AssertMsg(PDMCritSectIsInitialized(&pVM->pdm.s.CritSect), ("bad init order!\n"));
@@ -658,11 +667,15 @@ VMMR3DECL(int) PDMR3Term(PVM pVM)
}
TMR3TimerDestroyDevice(pVM, pDevIns);
- //SSMR3DeregisterDriver(pVM, pDevIns, NULL, 0);
- pdmR3CritSectDeleteDevice(pVM, pDevIns);
- //pdmR3ThreadDestroyDevice(pVM, pDevIns);
- //PDMR3QueueDestroyDevice(pVM, pDevIns);
+ SSMR3DeregisterDevice(pVM, pDevIns, NULL, 0);
+ pdmR3CritSectBothDeleteDevice(pVM, pDevIns);
+ pdmR3ThreadDestroyDevice(pVM, pDevIns);
+ PDMR3QueueDestroyDevice(pVM, pDevIns);
PGMR3PhysMMIO2Deregister(pVM, pDevIns, UINT32_MAX);
+#ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
+ pdmR3AsyncCompletionTemplateDestroyDevice(pVM, pDevIns);
+#endif
+ DBGFR3InfoDeregisterDevice(pVM, pDevIns, NULL);
}
/*
@@ -697,7 +710,7 @@ VMMR3DECL(int) PDMR3Term(PVM pVM)
* Destroy the PDM lock.
*/
PDMR3CritSectDelete(&pVM->pdm.s.CritSect);
- /* The MiscCritSect is deleted by PDMR3CritSectTerm. */
+ /* The MiscCritSect is deleted by PDMR3CritSectBothTerm later. */
LogFlow(("PDMR3Term: returns %Rrc\n", VINF_SUCCESS));
return VINF_SUCCESS;
@@ -711,7 +724,7 @@ VMMR3DECL(int) PDMR3Term(PVM pVM)
*
* @param pUVM Pointer to the user mode VM structure.
*/
-VMMR3DECL(void) PDMR3TermUVM(PUVM pUVM)
+VMMR3_INT_DECL(void) PDMR3TermUVM(PUVM pUVM)
{
/*
* In the normal cause of events we will now call pdmR3LdrTermU for
@@ -721,6 +734,7 @@ VMMR3DECL(void) PDMR3TermUVM(PUVM pUVM)
pdmR3LdrTermU(pUVM);
Assert(pUVM->pdm.s.pCritSects == NULL);
+ Assert(pUVM->pdm.s.pRwCritSects == NULL);
RTCritSectDelete(&pUVM->pdm.s.ListCritSect);
}
@@ -782,12 +796,12 @@ static DECLCALLBACK(int) pdmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
{
PVMCPU pVCpu = &pVM->aCpus[idCpu];
- SSMR3PutU32(pSSM, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC));
- SSMR3PutU32(pSSM, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC));
- SSMR3PutU32(pSSM, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_NMI));
- SSMR3PutU32(pSSM, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_SMI));
+ SSMR3PutU32(pSSM, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC));
+ SSMR3PutU32(pSSM, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC));
+ SSMR3PutU32(pSSM, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI));
+ SSMR3PutU32(pSSM, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI));
}
- SSMR3PutU32(pSSM, VM_FF_ISSET(pVM, VM_FF_PDM_DMA));
+ SSMR3PutU32(pSSM, VM_FF_IS_SET(pVM, VM_FF_PDM_DMA));
pdmR3SaveBoth(pVM, pSSM);
return VINF_SUCCESS;
@@ -806,15 +820,15 @@ static DECLCALLBACK(int) pdmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
static DECLCALLBACK(int) pdmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
{
LogFlow(("pdmR3LoadPrep: %s%s\n",
- VM_FF_ISSET(pVM, VM_FF_PDM_QUEUES) ? " VM_FF_PDM_QUEUES" : "",
- VM_FF_ISSET(pVM, VM_FF_PDM_DMA) ? " VM_FF_PDM_DMA" : ""));
+ VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES) ? " VM_FF_PDM_QUEUES" : "",
+ VM_FF_IS_SET(pVM, VM_FF_PDM_DMA) ? " VM_FF_PDM_DMA" : ""));
#ifdef LOG_ENABLED
for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
{
PVMCPU pVCpu = &pVM->aCpus[idCpu];
LogFlow(("pdmR3LoadPrep: VCPU %u %s%s\n", idCpu,
- VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC) ? " VMCPU_FF_INTERRUPT_APIC" : "",
- VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC) ? " VMCPU_FF_INTERRUPT_PIC" : ""));
+ VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC) ? " VMCPU_FF_INTERRUPT_APIC" : "",
+ VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC) ? " VMCPU_FF_INTERRUPT_PIC" : ""));
}
#endif
NOREF(pSSM);
@@ -823,7 +837,7 @@ static DECLCALLBACK(int) pdmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
* In case there is work pending that will raise an interrupt,
* start a DMA transfer, or release a lock. (unlikely)
*/
- if (VM_FF_ISSET(pVM, VM_FF_PDM_QUEUES))
+ if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
PDMR3QueueFlushAll(pVM);
/* Clear the FFs. */
@@ -885,7 +899,7 @@ static DECLCALLBACK(int) pdmR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersi
AssertMsgFailed(("fInterruptPending=%#x (APIC)\n", fInterruptPending));
return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
}
- AssertRelease(!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC));
+ AssertRelease(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC));
if (fInterruptPending)
VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC);
@@ -899,7 +913,7 @@ static DECLCALLBACK(int) pdmR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersi
AssertMsgFailed(("fInterruptPending=%#x (PIC)\n", fInterruptPending));
return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
}
- AssertRelease(!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC));
+ AssertRelease(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC));
if (fInterruptPending)
VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC);
@@ -915,7 +929,7 @@ static DECLCALLBACK(int) pdmR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersi
AssertMsgFailed(("fInterruptPending=%#x (NMI)\n", fInterruptPending));
return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
}
- AssertRelease(!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_NMI));
+ AssertRelease(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI));
if (fInterruptPending)
VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI);
@@ -929,7 +943,7 @@ static DECLCALLBACK(int) pdmR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersi
AssertMsgFailed(("fInterruptPending=%#x (SMI)\n", fInterruptPending));
return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
}
- AssertRelease(!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_SMI));
+ AssertRelease(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI));
if (fInterruptPending)
VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI);
}
@@ -947,7 +961,7 @@ static DECLCALLBACK(int) pdmR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersi
}
if (fDMAPending)
VM_FF_SET(pVM, VM_FF_PDM_DMA);
- Log(("pdmR3LoadExec: VM_FF_PDM_DMA=%RTbool\n", VM_FF_ISSET(pVM, VM_FF_PDM_DMA)));
+ Log(("pdmR3LoadExec: VM_FF_PDM_DMA=%RTbool\n", VM_FF_IS_SET(pVM, VM_FF_PDM_DMA)));
}
/*
@@ -1404,7 +1418,7 @@ DECLINLINE(void) pdmR3ResetDev(PPDMDEVINS pDevIns, PPDMNOTIFYASYNCSTATS pAsync)
*
* @param pVCpu Pointer to the VMCPU.
*/
-VMMR3DECL(void) PDMR3ResetCpu(PVMCPU pVCpu)
+VMMR3_INT_DECL(void) PDMR3ResetCpu(PVMCPU pVCpu)
{
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC);
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC);
@@ -1419,7 +1433,7 @@ VMMR3DECL(void) PDMR3ResetCpu(PVMCPU pVCpu)
*
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(void) PDMR3Reset(PVM pVM)
+VMMR3_INT_DECL(void) PDMR3Reset(PVM pVM)
{
LogFlow(("PDMR3Reset:\n"));
@@ -1506,6 +1520,34 @@ VMMR3DECL(void) PDMR3Reset(PVM pVM)
/**
+ * This function will tell all the devices to setup up their memory structures
+ * after VM construction and after VM reset.
+ *
+ * @param pVM Pointer to the VM.
+ * @param fAtReset Indicates the context, after reset if @c true or after
+ * construction if @c false.
+ */
+VMMR3_INT_DECL(void) PDMR3MemSetup(PVM pVM, bool fAtReset)
+{
+ LogFlow(("PDMR3MemSetup: fAtReset=%RTbool\n", fAtReset));
+ PDMDEVMEMSETUPCTX const enmCtx = fAtReset ? PDMDEVMEMSETUPCTX_AFTER_RESET : PDMDEVMEMSETUPCTX_AFTER_CONSTRUCTION;
+
+ /*
+ * Iterate thru the device instances and work the callback.
+ */
+ for (PPDMDEVINS pDevIns = pVM->pdm.s.pDevInstances; pDevIns; pDevIns = pDevIns->Internal.s.pNextR3)
+ if (pDevIns->pReg->pfnMemSetup)
+ {
+ PDMCritSectEnter(pDevIns->pCritSectRoR3, VERR_IGNORED);
+ pDevIns->pReg->pfnMemSetup(pDevIns, enmCtx);
+ PDMCritSectLeave(pDevIns->pCritSectRoR3);
+ }
+
+ LogFlow(("PDMR3MemSetup: returns void\n"));
+}
+
+
+/**
* Worker for PDMR3Suspend that deals with one driver.
*
* @param pDrvIns The driver instance.
@@ -1653,7 +1695,7 @@ DECLINLINE(void) pdmR3SuspendDev(PPDMDEVINS pDevIns, PPDMNOTIFYASYNCSTATS pAsync
* @param pVM Pointer to the VM.
* @thread EMT(0)
*/
-VMMR3DECL(void) PDMR3Suspend(PVM pVM)
+VMMR3_INT_DECL(void) PDMR3Suspend(PVM pVM)
{
LogFlow(("PDMR3Suspend:\n"));
VM_ASSERT_EMT0(pVM);
@@ -1814,7 +1856,7 @@ DECLINLINE(int) pdmR3ResumeDev(PPDMDEVINS pDevIns)
*
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(void) PDMR3Resume(PVM pVM)
+VMMR3_INT_DECL(void) PDMR3Resume(PVM pVM)
{
LogFlow(("PDMR3Resume:\n"));
@@ -2093,22 +2135,24 @@ VMMR3DECL(void) PDMR3PowerOff(PVM pVM)
* and use them to talk to the device.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszDevice Device name.
* @param iInstance Device instance.
* @param ppBase Where to store the pointer to the base device interface on success.
* @remark We're not doing any locking ATM, so don't try call this at times when the
* device chain is known to be updated.
*/
-VMMR3DECL(int) PDMR3QueryDevice(PVM pVM, const char *pszDevice, unsigned iInstance, PPDMIBASE *ppBase)
+VMMR3DECL(int) PDMR3QueryDevice(PUVM pUVM, const char *pszDevice, unsigned iInstance, PPDMIBASE *ppBase)
{
LogFlow(("PDMR3DeviceQuery: pszDevice=%p:{%s} iInstance=%u ppBase=%p\n", pszDevice, pszDevice, iInstance, ppBase));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
/*
* Iterate registered devices looking for the device.
*/
size_t cchDevice = strlen(pszDevice);
- for (PPDMDEV pDev = pVM->pdm.s.pDevs; pDev; pDev = pDev->pNext)
+ for (PPDMDEV pDev = pUVM->pVM->pdm.s.pDevs; pDev; pDev = pDev->pNext)
{
if ( pDev->cchName == cchDevice
&& !memcmp(pDev->pReg->szName, pszDevice, cchDevice))
@@ -2149,7 +2193,7 @@ VMMR3DECL(int) PDMR3QueryDevice(PVM pVM, const char *pszDevice, unsigned iInstan
* device and not the top level driver.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszDevice Device name.
* @param iInstance Device instance.
* @param iLun The Logical Unit to obtain the interface of.
@@ -2157,16 +2201,18 @@ VMMR3DECL(int) PDMR3QueryDevice(PVM pVM, const char *pszDevice, unsigned iInstan
* @remark We're not doing any locking ATM, so don't try call this at times when the
* device chain is known to be updated.
*/
-VMMR3DECL(int) PDMR3QueryDeviceLun(PVM pVM, const char *pszDevice, unsigned iInstance, unsigned iLun, PPDMIBASE *ppBase)
+VMMR3DECL(int) PDMR3QueryDeviceLun(PUVM pUVM, const char *pszDevice, unsigned iInstance, unsigned iLun, PPDMIBASE *ppBase)
{
LogFlow(("PDMR3QueryLun: pszDevice=%p:{%s} iInstance=%u iLun=%u ppBase=%p\n",
pszDevice, pszDevice, iInstance, iLun, ppBase));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
/*
* Find the LUN.
*/
PPDMLUN pLun;
- int rc = pdmR3DevFindLun(pVM, pszDevice, iInstance, iLun, &pLun);
+ int rc = pdmR3DevFindLun(pUVM->pVM, pszDevice, iInstance, iLun, &pLun);
if (RT_SUCCESS(rc))
{
*ppBase = pLun->pBase;
@@ -2182,7 +2228,7 @@ VMMR3DECL(int) PDMR3QueryDeviceLun(PVM pVM, const char *pszDevice, unsigned iIns
* Query the interface of the top level driver on a LUN.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszDevice Device name.
* @param iInstance Device instance.
* @param iLun The Logical Unit to obtain the interface of.
@@ -2190,10 +2236,12 @@ VMMR3DECL(int) PDMR3QueryDeviceLun(PVM pVM, const char *pszDevice, unsigned iIns
* @remark We're not doing any locking ATM, so don't try call this at times when the
* device chain is known to be updated.
*/
-VMMR3DECL(int) PDMR3QueryLun(PVM pVM, const char *pszDevice, unsigned iInstance, unsigned iLun, PPDMIBASE *ppBase)
+VMMR3DECL(int) PDMR3QueryLun(PUVM pUVM, const char *pszDevice, unsigned iInstance, unsigned iLun, PPDMIBASE *ppBase)
{
LogFlow(("PDMR3QueryLun: pszDevice=%p:{%s} iInstance=%u iLun=%u ppBase=%p\n",
pszDevice, pszDevice, iInstance, iLun, ppBase));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
/*
@@ -2223,7 +2271,7 @@ VMMR3DECL(int) PDMR3QueryLun(PVM pVM, const char *pszDevice, unsigned iInstance,
* is returned.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszDevice Device name.
* @param iInstance Device instance.
* @param iLun The Logical Unit to obtain the interface of.
@@ -2233,16 +2281,18 @@ VMMR3DECL(int) PDMR3QueryLun(PVM pVM, const char *pszDevice, unsigned iInstance,
* @remark We're not doing any locking ATM, so don't try call this at times when the
* device chain is known to be updated.
*/
-VMMR3DECL(int) PDMR3QueryDriverOnLun(PVM pVM, const char *pszDevice, unsigned iInstance, unsigned iLun, const char *pszDriver, PPPDMIBASE ppBase)
+VMMR3DECL(int) PDMR3QueryDriverOnLun(PUVM pUVM, const char *pszDevice, unsigned iInstance, unsigned iLun, const char *pszDriver, PPPDMIBASE ppBase)
{
LogFlow(("PDMR3QueryDriverOnLun: pszDevice=%p:{%s} iInstance=%u iLun=%u pszDriver=%p:{%s} ppBase=%p\n",
pszDevice, pszDevice, iInstance, iLun, pszDriver, pszDriver, ppBase));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
/*
* Find the LUN.
*/
PPDMLUN pLun;
- int rc = pdmR3DevFindLun(pVM, pszDevice, iInstance, iLun, &pLun);
+ int rc = pdmR3DevFindLun(pUVM->pVM, pszDevice, iInstance, iLun, &pLun);
if (RT_SUCCESS(rc))
{
if (pLun->pTop)
@@ -2276,7 +2326,7 @@ VMMR3DECL(void) PDMR3DmaRun(PVM pVM)
if (VMMGetCpuId(pVM) != 0)
return;
- if (VM_FF_TESTANDCLEAR(pVM, VM_FF_PDM_DMA))
+ if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_PDM_DMA))
{
if (pVM->pdm.s.pDmac)
{
@@ -2294,7 +2344,7 @@ VMMR3DECL(void) PDMR3DmaRun(PVM pVM)
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(int) PDMR3LockCall(PVM pVM)
+VMMR3_INT_DECL(int) PDMR3LockCall(PVM pVM)
{
return PDMR3CritSectEnterEx(&pVM->pdm.s.CritSect, true /* fHostCall */);
}
@@ -2309,11 +2359,11 @@ VMMR3DECL(int) PDMR3LockCall(PVM pVM)
* @param pvHeap Ring-3 pointer.
* @param cbSize Size of the heap.
*/
-VMMR3DECL(int) PDMR3RegisterVMMDevHeap(PVM pVM, RTGCPHYS GCPhys, RTR3PTR pvHeap, unsigned cbSize)
+VMMR3_INT_DECL(int) PDMR3VmmDevHeapRegister(PVM pVM, RTGCPHYS GCPhys, RTR3PTR pvHeap, unsigned cbSize)
{
Assert(pVM->pdm.s.pvVMMDevHeap == NULL);
- Log(("PDMR3RegisterVMMDevHeap %RGp %RHv %x\n", GCPhys, pvHeap, cbSize));
+ Log(("PDMR3VmmDevHeapRegister %RGp %RHv %x\n", GCPhys, pvHeap, cbSize));
pVM->pdm.s.pvVMMDevHeap = pvHeap;
pVM->pdm.s.GCPhysVMMDevHeap = GCPhys;
pVM->pdm.s.cbVMMDevHeap = cbSize;
@@ -2329,11 +2379,11 @@ VMMR3DECL(int) PDMR3RegisterVMMDevHeap(PVM pVM, RTGCPHYS GCPhys, RTR3PTR pvHeap,
* @param pVM Pointer to the VM.
* @param GCPhys The physical address.
*/
-VMMR3DECL(int) PDMR3UnregisterVMMDevHeap(PVM pVM, RTGCPHYS GCPhys)
+VMMR3_INT_DECL(int) PDMR3VmmDevHeapUnregister(PVM pVM, RTGCPHYS GCPhys)
{
Assert(pVM->pdm.s.GCPhysVMMDevHeap == GCPhys);
- Log(("PDMR3UnregisterVMMDevHeap %RGp\n", GCPhys));
+ Log(("PDMR3VmmDevHeapUnregister %RGp\n", GCPhys));
pVM->pdm.s.pvVMMDevHeap = NULL;
pVM->pdm.s.GCPhysVMMDevHeap = NIL_RTGCPHYS;
pVM->pdm.s.cbVMMDevHeap = 0;
@@ -2350,7 +2400,7 @@ VMMR3DECL(int) PDMR3UnregisterVMMDevHeap(PVM pVM, RTGCPHYS GCPhys)
* @param cbSize Allocation size.
* @param pv Ring-3 pointer. (out)
*/
-VMMR3DECL(int) PDMR3VMMDevHeapAlloc(PVM pVM, unsigned cbSize, RTR3PTR *ppv)
+VMMR3_INT_DECL(int) PDMR3VmmDevHeapAlloc(PVM pVM, size_t cbSize, RTR3PTR *ppv)
{
#ifdef DEBUG_bird
if (!cbSize || cbSize > pVM->pdm.s.cbVMMDevHeapLeft)
@@ -2359,9 +2409,9 @@ VMMR3DECL(int) PDMR3VMMDevHeapAlloc(PVM pVM, unsigned cbSize, RTR3PTR *ppv)
AssertReturn(cbSize && cbSize <= pVM->pdm.s.cbVMMDevHeapLeft, VERR_NO_MEMORY);
#endif
- Log(("PDMR3VMMDevHeapAlloc %x\n", cbSize));
+ Log(("PDMR3VMMDevHeapAlloc: %#zx\n", cbSize));
- /** @todo not a real heap as there's currently only one user. */
+ /** @todo Not a real heap as there's currently only one user. */
*ppv = pVM->pdm.s.pvVMMDevHeap;
pVM->pdm.s.cbVMMDevHeapLeft = 0;
return VINF_SUCCESS;
@@ -2375,9 +2425,9 @@ VMMR3DECL(int) PDMR3VMMDevHeapAlloc(PVM pVM, unsigned cbSize, RTR3PTR *ppv)
* @param pVM Pointer to the VM.
* @param pv Ring-3 pointer.
*/
-VMMR3DECL(int) PDMR3VMMDevHeapFree(PVM pVM, RTR3PTR pv)
+VMMR3_INT_DECL(int) PDMR3VmmDevHeapFree(PVM pVM, RTR3PTR pv)
{
- Log(("PDMR3VMMDevHeapFree %RHv\n", pv));
+ Log(("PDMR3VmmDevHeapFree: %RHv\n", pv));
/** @todo not a real heap as there's currently only one user. */
pVM->pdm.s.cbVMMDevHeapLeft = pVM->pdm.s.cbVMMDevHeap;
diff --git a/src/VBox/VMM/VMMR3/PDMAsyncCompletion.cpp b/src/VBox/VMM/VMMR3/PDMAsyncCompletion.cpp
index caced743..ec978605 100644
--- a/src/VBox/VMM/VMMR3/PDMAsyncCompletion.cpp
+++ b/src/VBox/VMM/VMMR3/PDMAsyncCompletion.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2011 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -194,6 +194,8 @@ static int pdmR3AsyncCompletionTemplateCreate(PVM pVM, PPPDMASYNCCOMPLETIONTEMPL
return VINF_SUCCESS;
}
+
+#ifdef SOME_UNUSED_FUNCTION
/**
* Creates a async completion template for a device instance.
*
@@ -206,7 +208,8 @@ static int pdmR3AsyncCompletionTemplateCreate(PVM pVM, PPPDMASYNCCOMPLETIONTEMPL
* @param pfnCompleted The completion callback routine.
* @param pszDesc Description.
*/
-VMMR3DECL(int) PDMR3AsyncCompletionTemplateCreateDevice(PVM pVM, PPDMDEVINS pDevIns, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate, PFNPDMASYNCCOMPLETEDEV pfnCompleted, const char *pszDesc)
+int pdmR3AsyncCompletionTemplateCreateDevice(PVM pVM, PPDMDEVINS pDevIns, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate,
+ PFNPDMASYNCCOMPLETEDEV pfnCompleted, const char *pszDesc)
{
LogFlow(("%s: pDevIns=%p ppTemplate=%p pfnCompleted=%p pszDesc=%s\n",
__FUNCTION__, pDevIns, ppTemplate, pfnCompleted, pszDesc));
@@ -235,6 +238,8 @@ VMMR3DECL(int) PDMR3AsyncCompletionTemplateCreateDevice(PVM pVM, PPDMDEVINS pDev
return rc;
}
+#endif /* SOME_UNUSED_FUNCTION */
+
/**
* Creates a async completion template for a driver instance.
@@ -249,10 +254,11 @@ VMMR3DECL(int) PDMR3AsyncCompletionTemplateCreateDevice(PVM pVM, PPDMDEVINS pDev
* @param pvTemplateUser Template user argument
* @param pszDesc Description.
*/
-VMMR3DECL(int) PDMR3AsyncCompletionTemplateCreateDriver(PVM pVM, PPDMDRVINS pDrvIns, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate, PFNPDMASYNCCOMPLETEDRV pfnCompleted, void *pvTemplateUser, const char *pszDesc)
+int pdmR3AsyncCompletionTemplateCreateDriver(PVM pVM, PPDMDRVINS pDrvIns, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate,
+ PFNPDMASYNCCOMPLETEDRV pfnCompleted, void *pvTemplateUser,
+ const char *pszDesc)
{
- LogFlow(("%s: pDrvIns=%p ppTemplate=%p pfnCompleted=%p pszDesc=%s\n",
- __FUNCTION__, pDrvIns, ppTemplate, pfnCompleted, pszDesc));
+ LogFlow(("PDMR3AsyncCompletionTemplateCreateDriver: pDrvIns=%p ppTemplate=%p pfnCompleted=%p pszDesc=%s\n", pDrvIns, ppTemplate, pfnCompleted, pszDesc));
/*
* Validate input.
@@ -279,6 +285,8 @@ VMMR3DECL(int) PDMR3AsyncCompletionTemplateCreateDriver(PVM pVM, PPDMDRVINS pDrv
return rc;
}
+
+#ifdef SOME_UNUSED_FUNCTION
/**
* Creates a async completion template for a USB device instance.
*
@@ -291,10 +299,10 @@ VMMR3DECL(int) PDMR3AsyncCompletionTemplateCreateDriver(PVM pVM, PPDMDRVINS pDrv
* @param pfnCompleted The completion callback routine.
* @param pszDesc Description.
*/
-VMMR3DECL(int) PDMR3AsyncCompletionTemplateCreateUsb(PVM pVM, PPDMUSBINS pUsbIns, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate, PFNPDMASYNCCOMPLETEUSB pfnCompleted, const char *pszDesc)
+int pdmR3AsyncCompletionTemplateCreateUsb(PVM pVM, PPDMUSBINS pUsbIns, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate,
+ PFNPDMASYNCCOMPLETEUSB pfnCompleted, const char *pszDesc)
{
- LogFlow(("%s: pUsbIns=%p ppTemplate=%p pfnCompleted=%p pszDesc=%s\n",
- __FUNCTION__, pUsbIns, ppTemplate, pfnCompleted, pszDesc));
+ LogFlow(("pdmR3AsyncCompletionTemplateCreateUsb: pUsbIns=%p ppTemplate=%p pfnCompleted=%p pszDesc=%s\n", pUsbIns, ppTemplate, pfnCompleted, pszDesc));
/*
* Validate input.
@@ -320,6 +328,8 @@ VMMR3DECL(int) PDMR3AsyncCompletionTemplateCreateUsb(PVM pVM, PPDMUSBINS pUsbIns
return rc;
}
+#endif
+
/**
* Creates a async completion template for internally by the VMM.
@@ -332,6 +342,7 @@ VMMR3DECL(int) PDMR3AsyncCompletionTemplateCreateUsb(PVM pVM, PPDMUSBINS pUsbIns
* @param pfnCompleted The completion callback routine.
* @param pvUser2 The 2nd user argument for the callback.
* @param pszDesc Description.
+ * @internal
*/
VMMR3DECL(int) PDMR3AsyncCompletionTemplateCreateInternal(PVM pVM, PPPDMASYNCCOMPLETIONTEMPLATE ppTemplate, PFNPDMASYNCCOMPLETEINT pfnCompleted, void *pvUser2, const char *pszDesc)
{
@@ -363,6 +374,7 @@ VMMR3DECL(int) PDMR3AsyncCompletionTemplateCreateInternal(PVM pVM, PPPDMASYNCCOM
return rc;
}
+
/**
* Destroys the specified async completion template.
*
@@ -418,6 +430,7 @@ VMMR3DECL(int) PDMR3AsyncCompletionTemplateDestroy(PPDMASYNCCOMPLETIONTEMPLATE p
return VINF_SUCCESS;
}
+
/**
* Destroys all the specified async completion templates for the given device instance.
*
@@ -428,9 +441,9 @@ VMMR3DECL(int) PDMR3AsyncCompletionTemplateDestroy(PPDMASYNCCOMPLETIONTEMPLATE p
* @param pVM Pointer to the VM.
* @param pDevIns The device instance.
*/
-VMMR3DECL(int) PDMR3AsyncCompletionTemplateDestroyDevice(PVM pVM, PPDMDEVINS pDevIns)
+int pdmR3AsyncCompletionTemplateDestroyDevice(PVM pVM, PPDMDEVINS pDevIns)
{
- LogFlow(("%s: pDevIns=%p\n", __FUNCTION__, pDevIns));
+ LogFlow(("pdmR3AsyncCompletionTemplateDestroyDevice: pDevIns=%p\n", pDevIns));
/*
* Validate input.
@@ -467,6 +480,7 @@ VMMR3DECL(int) PDMR3AsyncCompletionTemplateDestroyDevice(PVM pVM, PPDMDEVINS pDe
return VINF_SUCCESS;
}
+
/**
* Destroys all the specified async completion templates for the given driver instance.
*
@@ -477,9 +491,9 @@ VMMR3DECL(int) PDMR3AsyncCompletionTemplateDestroyDevice(PVM pVM, PPDMDEVINS pDe
* @param pVM Pointer to the VM.
* @param pDrvIns The driver instance.
*/
-VMMR3DECL(int) PDMR3AsyncCompletionTemplateDestroyDriver(PVM pVM, PPDMDRVINS pDrvIns)
+int pdmR3AsyncCompletionTemplateDestroyDriver(PVM pVM, PPDMDRVINS pDrvIns)
{
- LogFlow(("%s: pDevIns=%p\n", __FUNCTION__, pDrvIns));
+ LogFlow(("pdmR3AsyncCompletionTemplateDestroyDriver: pDevIns=%p\n", pDrvIns));
/*
* Validate input.
@@ -516,6 +530,7 @@ VMMR3DECL(int) PDMR3AsyncCompletionTemplateDestroyDriver(PVM pVM, PPDMDRVINS pDr
return VINF_SUCCESS;
}
+
/**
* Destroys all the specified async completion templates for the given USB device instance.
*
@@ -526,9 +541,9 @@ VMMR3DECL(int) PDMR3AsyncCompletionTemplateDestroyDriver(PVM pVM, PPDMDRVINS pDr
* @param pVM Pointer to the VM.
* @param pUsbIns The USB device instance.
*/
-VMMR3DECL(int) PDMR3AsyncCompletionTemplateDestroyUsb(PVM pVM, PPDMUSBINS pUsbIns)
+int pdmR3AsyncCompletionTemplateDestroyUsb(PVM pVM, PPDMUSBINS pUsbIns)
{
- LogFlow(("%s: pUsbIns=%p\n", __FUNCTION__, pUsbIns));
+ LogFlow(("pdmR3AsyncCompletionTemplateDestroyUsb: pUsbIns=%p\n", pUsbIns));
/*
* Validate input.
@@ -566,17 +581,18 @@ VMMR3DECL(int) PDMR3AsyncCompletionTemplateDestroyUsb(PVM pVM, PPDMUSBINS pUsbIn
}
-static PPDMACBWMGR pdmacBwMgrFindById(PPDMASYNCCOMPLETIONEPCLASS pEpClass, const char *pcszId)
+/** Lazy coder. */
+static PPDMACBWMGR pdmacBwMgrFindById(PPDMASYNCCOMPLETIONEPCLASS pEpClass, const char *pszId)
{
PPDMACBWMGR pBwMgr = NULL;
- if (RT_VALID_PTR(pcszId))
+ if (pszId)
{
int rc = RTCritSectEnter(&pEpClass->CritSect); AssertRC(rc);
pBwMgr = pEpClass->pBwMgrsHead;
while ( pBwMgr
- && RTStrCmp(pBwMgr->pszId, pcszId))
+ && RTStrCmp(pBwMgr->pszId, pszId))
pBwMgr = pBwMgr->pNext;
rc = RTCritSectLeave(&pEpClass->CritSect); AssertRC(rc);
@@ -585,6 +601,8 @@ static PPDMACBWMGR pdmacBwMgrFindById(PPDMASYNCCOMPLETIONEPCLASS pEpClass, const
return pBwMgr;
}
+
+/** Lazy coder. */
static void pdmacBwMgrLink(PPDMACBWMGR pBwMgr)
{
PPDMASYNCCOMPLETIONEPCLASS pEpClass = pBwMgr->pEpClass;
@@ -596,7 +614,9 @@ static void pdmacBwMgrLink(PPDMACBWMGR pBwMgr)
rc = RTCritSectLeave(&pEpClass->CritSect); AssertRC(rc);
}
+
#ifdef SOME_UNUSED_FUNCTION
+/** Lazy coder. */
static void pdmacBwMgrUnlink(PPDMACBWMGR pBwMgr)
{
PPDMASYNCCOMPLETIONEPCLASS pEpClass = pBwMgr->pEpClass;
@@ -619,18 +639,20 @@ static void pdmacBwMgrUnlink(PPDMACBWMGR pBwMgr)
}
#endif /* SOME_UNUSED_FUNCTION */
-static int pdmacAsyncCompletionBwMgrCreate(PPDMASYNCCOMPLETIONEPCLASS pEpClass, const char *pcszBwMgr, uint32_t cbTransferPerSecMax,
+
+/** Lazy coder. */
+static int pdmacAsyncCompletionBwMgrCreate(PPDMASYNCCOMPLETIONEPCLASS pEpClass, const char *pszBwMgr, uint32_t cbTransferPerSecMax,
uint32_t cbTransferPerSecStart, uint32_t cbTransferPerSecStep)
{
- LogFlowFunc(("pEpClass=%#p pcszBwMgr=%#p{%s} cbTransferPerSecMax=%u cbTransferPerSecStart=%u cbTransferPerSecStep=%u\n",
- pEpClass, pcszBwMgr, cbTransferPerSecMax, cbTransferPerSecStart, cbTransferPerSecStep));
+ LogFlowFunc(("pEpClass=%#p pszBwMgr=%#p{%s} cbTransferPerSecMax=%u cbTransferPerSecStart=%u cbTransferPerSecStep=%u\n",
+ pEpClass, pszBwMgr, cbTransferPerSecMax, cbTransferPerSecStart, cbTransferPerSecStep));
AssertPtrReturn(pEpClass, VERR_INVALID_POINTER);
- AssertPtrReturn(pcszBwMgr, VERR_INVALID_POINTER);
- AssertReturn(*pcszBwMgr != '\0', VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pszBwMgr, VERR_INVALID_POINTER);
+ AssertReturn(*pszBwMgr != '\0', VERR_INVALID_PARAMETER);
int rc;
- PPDMACBWMGR pBwMgr = pdmacBwMgrFindById(pEpClass, pcszBwMgr);
+ PPDMACBWMGR pBwMgr = pdmacBwMgrFindById(pEpClass, pszBwMgr);
if (!pBwMgr)
{
rc = MMR3HeapAllocZEx(pEpClass->pVM, MM_TAG_PDM_ASYNC_COMPLETION,
@@ -638,7 +660,7 @@ static int pdmacAsyncCompletionBwMgrCreate(PPDMASYNCCOMPLETIONEPCLASS pEpClass,
(void **)&pBwMgr);
if (RT_SUCCESS(rc))
{
- pBwMgr->pszId = RTStrDup(pcszBwMgr);
+ pBwMgr->pszId = RTStrDup(pszBwMgr);
if (pBwMgr->pszId)
{
pBwMgr->pEpClass = pEpClass;
@@ -669,17 +691,33 @@ static int pdmacAsyncCompletionBwMgrCreate(PPDMASYNCCOMPLETIONEPCLASS pEpClass,
return rc;
}
-DECLINLINE(void) pdmacBwMgrRef(PPDMACBWMGR pBwMgr)
+
+/** Lazy coder. */
+DECLINLINE(void) pdmacBwMgrRetain(PPDMACBWMGR pBwMgr)
{
ASMAtomicIncU32(&pBwMgr->cRefs);
}
-DECLINLINE(void) pdmacBwMgrUnref(PPDMACBWMGR pBwMgr)
+
+/** Lazy coder. */
+DECLINLINE(void) pdmacBwMgrRelease(PPDMACBWMGR pBwMgr)
{
Assert(pBwMgr->cRefs > 0);
ASMAtomicDecU32(&pBwMgr->cRefs);
}
+
+/**
+ * Checks if the endpoint is allowed to transfer the given amount of bytes.
+ *
+ * @returns true if the endpoint is allowed to transfer the data.
+ * false otherwise
+ * @param pEndpoint The endpoint.
+ * @param cbTransfer The number of bytes to transfer.
+ * @param pmsWhenNext Where to store the number of milliseconds
+ * until the bandwidth is refreshed.
+ * Only set if false is returned.
+ */
bool pdmacEpIsTransferAllowed(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, uint32_t cbTransfer, RTMSINTERVAL *pmsWhenNext)
{
bool fAllowed = true;
@@ -728,6 +766,16 @@ bool pdmacEpIsTransferAllowed(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, uint32_t cb
return fAllowed;
}
+
+/**
+ * Called by the endpoint if a task has finished.
+ *
+ * @returns nothing
+ * @param pTask Pointer to the finished task.
+ * @param rc Status code of the completed request.
+ * @param fCallCompletionHandler Flag whether the completion handler should be called to
+ * inform the owner of the task that it has completed.
+ */
void pdmR3AsyncCompletionCompleteTask(PPDMASYNCCOMPLETIONTASK pTask, int rc, bool fCallCompletionHandler)
{
LogFlow(("%s: pTask=%#p fCallCompletionHandler=%RTbool\n", __FUNCTION__, pTask, fCallCompletionHandler));
@@ -762,6 +810,7 @@ void pdmR3AsyncCompletionCompleteTask(PPDMASYNCCOMPLETIONTASK pTask, int rc, boo
pdmR3AsyncCompletionPutTask(pTask->pEndpoint, pTask);
}
+
/**
* Worker initializing a endpoint class.
*
@@ -777,7 +826,7 @@ int pdmR3AsyncCompletionEpClassInit(PVM pVM, PCPDMASYNCCOMPLETIONEPCLASSOPS pEpC
AssertReturn(pEpClassOps->u32Version == PDMAC_EPCLASS_OPS_VERSION, VERR_VERSION_MISMATCH);
AssertReturn(pEpClassOps->u32VersionEnd == PDMAC_EPCLASS_OPS_VERSION, VERR_VERSION_MISMATCH);
- LogFlowFunc((": pVM=%p pEpClassOps=%p{%s}\n", pVM, pEpClassOps, pEpClassOps->pcszName));
+ LogFlow(("pdmR3AsyncCompletionEpClassInit: pVM=%p pEpClassOps=%p{%s}\n", pVM, pEpClassOps, pEpClassOps->pszName));
/* Allocate global class data. */
PPDMASYNCCOMPLETIONEPCLASS pEndpointClass = NULL;
@@ -794,7 +843,7 @@ int pdmR3AsyncCompletionEpClassInit(PVM pVM, PCPDMASYNCCOMPLETIONEPCLASSOPS pEpC
rc = RTCritSectInit(&pEndpointClass->CritSect);
if (RT_SUCCESS(rc))
{
- PCFGMNODE pCfgNodeClass = CFGMR3GetChild(pCfgHandle, pEpClassOps->pcszName);
+ PCFGMNODE pCfgNodeClass = CFGMR3GetChild(pCfgHandle, pEpClassOps->pszName);
/* Create task cache */
rc = RTMemCacheCreate(&pEndpointClass->hMemCacheTasks, pEpClassOps->cbTask,
@@ -847,8 +896,14 @@ int pdmR3AsyncCompletionEpClassInit(PVM pVM, PCPDMASYNCCOMPLETIONEPCLASSOPS pEpC
AssertMsg(!pUVM->pdm.s.apAsyncCompletionEndpointClass[pEpClassOps->enmClassType],
("Endpoint class was already initialized\n"));
+#ifdef VBOX_WITH_STATISTICS
+ CFGMR3QueryBoolDef(pCfgNodeClass, "AdvancedStatistics", &pEndpointClass->fGatherAdvancedStatistics, true);
+#else
+ CFGMR3QueryBoolDef(pCfgNodeClass, "AdvancedStatistics", &pEndpointClass->fGatherAdvancedStatistics, false);
+#endif
+
pUVM->pdm.s.apAsyncCompletionEndpointClass[pEpClassOps->enmClassType] = pEndpointClass;
- LogFlowFunc((": Initialized endpoint class \"%s\" rc=%Rrc\n", pEpClassOps->pcszName, rc));
+ LogFlowFunc((": Initialized endpoint class \"%s\" rc=%Rrc\n", pEpClassOps->pszName, rc));
return VINF_SUCCESS;
}
}
@@ -864,6 +919,7 @@ int pdmR3AsyncCompletionEpClassInit(PVM pVM, PCPDMASYNCCOMPLETIONEPCLASSOPS pEpC
return rc;
}
+
/**
* Worker terminating all endpoint classes.
*
@@ -900,6 +956,276 @@ static void pdmR3AsyncCompletionEpClassTerminate(PPDMASYNCCOMPLETIONEPCLASS pEnd
MMR3HeapFree(pEndpointClass);
}
+
+/**
+ * Records the size of the request in the statistics.
+ *
+ * @returns nothing.
+ * @param pEndpoint The endpoint to register the request size for.
+ * @param cbReq Size of the request.
+ */
+static void pdmR3AsyncCompletionStatisticsRecordSize(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, size_t cbReq)
+{
+ if (cbReq < 512)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSizeSmaller512);
+ else if (cbReq < _1K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize512To1K);
+ else if (cbReq < _2K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize1KTo2K);
+ else if (cbReq < _4K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize2KTo4K);
+ else if (cbReq < _8K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize4KTo8K);
+ else if (cbReq < _16K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize8KTo16K);
+ else if (cbReq < _32K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize16KTo32K);
+ else if (cbReq < _64K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize32KTo64K);
+ else if (cbReq < _128K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize64KTo128K);
+ else if (cbReq < _256K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize128KTo256K);
+ else if (cbReq < _512K)
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSize256KTo512K);
+ else
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqSizeOver512K);
+
+ if (cbReq & ((size_t)512 - 1))
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqsUnaligned512);
+ else if (cbReq & ((size_t)_4K - 1))
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqsUnaligned4K);
+ else if (cbReq & ((size_t)_8K - 1))
+ STAM_REL_COUNTER_INC(&pEndpoint->StatReqsUnaligned8K);
+}
+
+
+/**
+ * Records the required processing time of a request.
+ *
+ * @returns nothing.
+ * @param pEndpoint The endpoint.
+ * @param cNsRun The request time in nanoseconds.
+ */
+static void pdmR3AsyncCompletionStatisticsRecordCompletionTime(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, uint64_t cNsRun)
+{
+ PSTAMCOUNTER pStatCounter;
+ if (cNsRun < RT_NS_1US)
+ pStatCounter = &pEndpoint->StatTaskRunTimesNs[cNsRun / (RT_NS_1US / 10)];
+ else if (cNsRun < RT_NS_1MS)
+ pStatCounter = &pEndpoint->StatTaskRunTimesUs[cNsRun / (RT_NS_1MS / 10)];
+ else if (cNsRun < RT_NS_1SEC)
+ pStatCounter = &pEndpoint->StatTaskRunTimesMs[cNsRun / (RT_NS_1SEC / 10)];
+ else if (cNsRun < RT_NS_1SEC_64*100)
+ pStatCounter = &pEndpoint->StatTaskRunTimesSec[cNsRun / (RT_NS_1SEC_64*100 / 10)];
+ else
+ pStatCounter = &pEndpoint->StatTaskRunOver100Sec;
+ STAM_REL_COUNTER_INC(pStatCounter);
+
+ STAM_REL_COUNTER_INC(&pEndpoint->StatIoOpsCompleted);
+ pEndpoint->cIoOpsCompleted++;
+ uint64_t tsMsCur = RTTimeMilliTS();
+ uint64_t tsInterval = tsMsCur - pEndpoint->tsIntervalStartMs;
+ if (tsInterval >= 1000)
+ {
+ pEndpoint->StatIoOpsPerSec.c = pEndpoint->cIoOpsCompleted / (tsInterval / 1000);
+ pEndpoint->tsIntervalStartMs = tsMsCur;
+ pEndpoint->cIoOpsCompleted = 0;
+ }
+}
+
+
+/**
+ * Registers advanced statistics for the given endpoint.
+ *
+ * @returns VBox status code.
+ * @param pEndpoint The endpoint to register the advanced statistics for.
+ */
+static int pdmR3AsyncCompletionStatisticsRegister(PPDMASYNCCOMPLETIONENDPOINT pEndpoint)
+{
+ int rc = VINF_SUCCESS;
+ PVM pVM = pEndpoint->pEpClass->pVM;
+
+ pEndpoint->tsIntervalStartMs = RTTimeMilliTS();
+
+ for (unsigned i = 0; i < RT_ELEMENTS(pEndpoint->StatTaskRunTimesNs) && RT_SUCCESS(rc); i++)
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatTaskRunTimesNs[i], STAMTYPE_COUNTER,
+ STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Nanosecond resolution runtime statistics",
+ "/PDM/AsyncCompletion/File/%s/TaskRun1Ns-%u-%u",
+ RTPathFilename(pEndpoint->pszUri), i*100, i*100+100-1);
+
+ for (unsigned i = 0; i < RT_ELEMENTS(pEndpoint->StatTaskRunTimesUs) && RT_SUCCESS(rc); i++)
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatTaskRunTimesUs[i], STAMTYPE_COUNTER,
+ STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Microsecond resolution runtime statistics",
+ "/PDM/AsyncCompletion/File/%s/TaskRun2MicroSec-%u-%u",
+ RTPathFilename(pEndpoint->pszUri), i*100, i*100+100-1);
+
+ for (unsigned i = 0; i < RT_ELEMENTS(pEndpoint->StatTaskRunTimesMs) && RT_SUCCESS(rc); i++)
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatTaskRunTimesMs[i], STAMTYPE_COUNTER,
+ STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Milliseconds resolution runtime statistics",
+ "/PDM/AsyncCompletion/File/%s/TaskRun3Ms-%u-%u",
+ RTPathFilename(pEndpoint->pszUri), i*100, i*100+100-1);
+
+ for (unsigned i = 0; i < RT_ELEMENTS(pEndpoint->StatTaskRunTimesMs) && RT_SUCCESS(rc); i++)
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatTaskRunTimesSec[i], STAMTYPE_COUNTER,
+ STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Second resolution runtime statistics",
+ "/PDM/AsyncCompletion/File/%s/TaskRun4Sec-%u-%u",
+ RTPathFilename(pEndpoint->pszUri), i*10, i*10+10-1);
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatTaskRunOver100Sec, STAMTYPE_COUNTER,
+ STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Tasks which ran more than 100sec",
+ "/PDM/AsyncCompletion/File/%s/TaskRunSecGreater100Sec",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatIoOpsPerSec, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Processed I/O operations per second",
+ "/PDM/AsyncCompletion/File/%s/IoOpsPerSec",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatIoOpsStarted, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Started I/O operations for this endpoint",
+ "/PDM/AsyncCompletion/File/%s/IoOpsStarted",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatIoOpsCompleted, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Completed I/O operations for this endpoint",
+ "/PDM/AsyncCompletion/File/%s/IoOpsCompleted",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSizeSmaller512, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size smaller than 512 bytes",
+ "/PDM/AsyncCompletion/File/%s/ReqSizeSmaller512",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize512To1K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 512 bytes and 1KB",
+ "/PDM/AsyncCompletion/File/%s/ReqSize512To1K",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize1KTo2K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 1KB and 2KB",
+ "/PDM/AsyncCompletion/File/%s/ReqSize1KTo2K",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize2KTo4K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 2KB and 4KB",
+ "/PDM/AsyncCompletion/File/%s/ReqSize2KTo4K",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize4KTo8K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 4KB and 8KB",
+ "/PDM/AsyncCompletion/File/%s/ReqSize4KTo8K",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize8KTo16K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 8KB and 16KB",
+ "/PDM/AsyncCompletion/File/%s/ReqSize8KTo16K",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize16KTo32K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 16KB and 32KB",
+ "/PDM/AsyncCompletion/File/%s/ReqSize16KTo32K",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize32KTo64K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 32KB and 64KB",
+ "/PDM/AsyncCompletion/File/%s/ReqSize32KTo64K",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize64KTo128K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 64KB and 128KB",
+ "/PDM/AsyncCompletion/File/%s/ReqSize64KTo128K",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize128KTo256K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 128KB and 256KB",
+ "/PDM/AsyncCompletion/File/%s/ReqSize128KTo256K",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSize256KTo512K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size between 256KB and 512KB",
+ "/PDM/AsyncCompletion/File/%s/ReqSize256KTo512K",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqSizeOver512K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests with a size over 512KB",
+ "/PDM/AsyncCompletion/File/%s/ReqSizeOver512K",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqsUnaligned512, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests which size is not aligned to 512 bytes",
+ "/PDM/AsyncCompletion/File/%s/ReqsUnaligned512",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqsUnaligned4K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests which size is not aligned to 4KB",
+ "/PDM/AsyncCompletion/File/%s/ReqsUnaligned4K",
+ RTPathFilename(pEndpoint->pszUri));
+
+ if (RT_SUCCESS(rc))
+ rc = STAMR3RegisterF(pVM, &pEndpoint->StatReqsUnaligned8K, STAMTYPE_COUNTER,
+ STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
+ "Number of requests which size is not aligned to 8KB",
+ "/PDM/AsyncCompletion/File/%s/ReqsUnaligned8K",
+ RTPathFilename(pEndpoint->pszUri));
+
+ return rc;
+}
+
+
+/**
+ * Deregisters advanced statistics for one endpoint.
+ *
+ * @returns nothing.
+ * @param pEndpoint The endpoint to deregister the advanced statistics for.
+ */
+static void pdmR3AsyncCompletionStatisticsDeregister(PPDMASYNCCOMPLETIONENDPOINT pEndpoint)
+{
+ /* I hope this doesn't remove too much... */
+ STAMR3DeregisterF(pEndpoint->pEpClass->pVM->pUVM, "/PDM/AsyncCompletion/File/%s/*", RTPathFilename(pEndpoint->pszUri));
+}
+
+
/**
* Initialize the async completion manager.
*
@@ -920,6 +1246,7 @@ int pdmR3AsyncCompletionInit(PVM pVM)
return rc;
}
+
/**
* Terminates the async completion manager.
*
@@ -938,6 +1265,7 @@ int pdmR3AsyncCompletionTerm(PVM pVM)
return VINF_SUCCESS;
}
+
/**
* Resume worker for the async completion manager.
*
@@ -996,6 +1324,7 @@ void pdmR3AsyncCompletionResume(PVM pVM)
}
}
+
/**
* Tries to get a free task from the endpoint or class cache
* allocating the task if it fails.
@@ -1017,14 +1346,13 @@ static PPDMASYNCCOMPLETIONTASK pdmR3AsyncCompletionGetTask(PPDMASYNCCOMPLETIONEN
pTask->pPrev = NULL;
pTask->pNext = NULL;
pTask->tsNsStart = RTTimeNanoTS();
-#ifdef VBOX_WITH_STATISTICS
- STAM_COUNTER_INC(&pEndpoint->StatIoOpsStarted);
-#endif
+ STAM_REL_COUNTER_INC(&pEndpoint->StatIoOpsStarted);
}
return pTask;
}
+
/**
* Puts a task in one of the caches.
*
@@ -1040,37 +1368,15 @@ static void pdmR3AsyncCompletionPutTask(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, P
if (RT_UNLIKELY(cNsRun >= RT_NS_10SEC))
LogRel(("AsyncCompletion: Task %#p completed after %llu seconds\n", pTask, cNsRun / RT_NS_1SEC));
-#ifdef VBOX_WITH_STATISTICS
- PSTAMCOUNTER pStatCounter;
- if (cNsRun < RT_NS_1US)
- pStatCounter = &pEndpoint->StatTaskRunTimesNs[cNsRun / (RT_NS_1US / 10)];
- else if (cNsRun < RT_NS_1MS)
- pStatCounter = &pEndpoint->StatTaskRunTimesUs[cNsRun / (RT_NS_1MS / 10)];
- else if (cNsRun < RT_NS_1SEC)
- pStatCounter = &pEndpoint->StatTaskRunTimesMs[cNsRun / (RT_NS_1SEC / 10)];
- else if (cNsRun < RT_NS_1SEC_64*100)
- pStatCounter = &pEndpoint->StatTaskRunTimesSec[cNsRun / (RT_NS_1SEC_64*100 / 10)];
- else
- pStatCounter = &pEndpoint->StatTaskRunOver100Sec;
- STAM_COUNTER_INC(pStatCounter);
-
- STAM_COUNTER_INC(&pEndpoint->StatIoOpsCompleted);
- pEndpoint->cIoOpsCompleted++;
- uint64_t tsMsCur = RTTimeMilliTS();
- uint64_t tsInterval = tsMsCur - pEndpoint->tsIntervalStartMs;
- if (tsInterval >= 1000)
- {
- pEndpoint->StatIoOpsPerSec.c = pEndpoint->cIoOpsCompleted / (tsInterval / 1000);
- pEndpoint->tsIntervalStartMs = tsMsCur;
- pEndpoint->cIoOpsCompleted = 0;
- }
-#endif /* VBOX_WITH_STATISTICS */
+ if (pEndpointClass->fGatherAdvancedStatistics)
+ pdmR3AsyncCompletionStatisticsRecordCompletionTime(pEndpoint, cNsRun);
RTMemCacheFree(pEndpointClass->hMemCacheTasks, pTask);
}
-static PPDMASYNCCOMPLETIONENDPOINT pdmR3AsyncCompletionFindEndpointWithUri(PPDMASYNCCOMPLETIONEPCLASS pEndpointClass,
- const char *pszUri)
+
+static PPDMASYNCCOMPLETIONENDPOINT
+pdmR3AsyncCompletionFindEndpointWithUri(PPDMASYNCCOMPLETIONEPCLASS pEndpointClass, const char *pszUri)
{
PPDMASYNCCOMPLETIONENDPOINT pEndpoint = pEndpointClass->pEndpointsHead;
@@ -1085,6 +1391,17 @@ static PPDMASYNCCOMPLETIONENDPOINT pdmR3AsyncCompletionFindEndpointWithUri(PPDMA
return NULL;
}
+
+/**
+ * Opens a file as an async completion endpoint.
+ *
+ * @returns VBox status code.
+ * @param ppEndpoint Where to store the opaque endpoint handle on success.
+ * @param pszFilename Path to the file which is to be opened. (UTF-8)
+ * @param fFlags Open flags, see grp_pdmacep_file_flags.
+ * @param pTemplate Handle to the completion callback template to use
+ * for this end point.
+ */
VMMR3DECL(int) PDMR3AsyncCompletionEpCreateForFile(PPPDMASYNCCOMPLETIONENDPOINT ppEndpoint,
const char *pszFilename, uint32_t fFlags,
PPDMASYNCCOMPLETIONTEMPLATE pTemplate)
@@ -1125,7 +1442,6 @@ VMMR3DECL(int) PDMR3AsyncCompletionEpCreateForFile(PPPDMASYNCCOMPLETIONENDPOINT
(void **)&pEndpoint);
if (RT_SUCCESS(rc))
{
-
/* Initialize common parts. */
pEndpoint->pNext = NULL;
pEndpoint->pPrev = NULL;
@@ -1142,135 +1458,35 @@ VMMR3DECL(int) PDMR3AsyncCompletionEpCreateForFile(PPPDMASYNCCOMPLETIONENDPOINT
rc = pEndpointClass->pEndpointOps->pfnEpInitialize(pEndpoint, pszFilename, fFlags);
if (RT_SUCCESS(rc))
{
- /* Link it into the list of endpoints. */
- rc = RTCritSectEnter(&pEndpointClass->CritSect);
- AssertMsg(RT_SUCCESS(rc), ("Failed to enter critical section rc=%Rrc\n", rc));
-
- pEndpoint->pNext = pEndpointClass->pEndpointsHead;
- if (pEndpointClass->pEndpointsHead)
- pEndpointClass->pEndpointsHead->pPrev = pEndpoint;
-
- pEndpointClass->pEndpointsHead = pEndpoint;
- pEndpointClass->cEndpoints++;
-
- rc = RTCritSectLeave(&pEndpointClass->CritSect);
- AssertMsg(RT_SUCCESS(rc), ("Failed to enter critical section rc=%Rrc\n", rc));
-
- /* Reference the template. */
- ASMAtomicIncU32(&pTemplate->cUsed);
-
-#ifdef VBOX_WITH_STATISTICS
- /* Init the statistics part */
- for (unsigned i = 0; i < RT_ELEMENTS(pEndpoint->StatTaskRunTimesNs); i++)
- {
- rc = STAMR3RegisterF(pVM, &pEndpoint->StatTaskRunTimesNs[i], STAMTYPE_COUNTER,
- STAMVISIBILITY_USED,
- STAMUNIT_OCCURENCES,
- "Nanosecond resolution runtime statistics",
- "/PDM/AsyncCompletion/File/%s/TaskRun1Ns-%u-%u",
- RTPathFilename(pEndpoint->pszUri),
- i*100, i*100+100-1);
- if (RT_FAILURE(rc))
- break;
- }
+ if (pEndpointClass->fGatherAdvancedStatistics)
+ rc = pdmR3AsyncCompletionStatisticsRegister(pEndpoint);
if (RT_SUCCESS(rc))
{
- for (unsigned i = 0; i < RT_ELEMENTS(pEndpoint->StatTaskRunTimesUs); i++)
- {
- rc = STAMR3RegisterF(pVM, &pEndpoint->StatTaskRunTimesUs[i], STAMTYPE_COUNTER,
- STAMVISIBILITY_USED,
- STAMUNIT_OCCURENCES,
- "Microsecond resolution runtime statistics",
- "/PDM/AsyncCompletion/File/%s/TaskRun2MicroSec-%u-%u",
- RTPathFilename(pEndpoint->pszUri),
- i*100, i*100+100-1);
- if (RT_FAILURE(rc))
- break;
- }
- }
+ /* Link it into the list of endpoints. */
+ rc = RTCritSectEnter(&pEndpointClass->CritSect);
+ AssertMsg(RT_SUCCESS(rc), ("Failed to enter critical section rc=%Rrc\n", rc));
- if (RT_SUCCESS(rc))
- {
- for (unsigned i = 0; i < RT_ELEMENTS(pEndpoint->StatTaskRunTimesMs); i++)
- {
- rc = STAMR3RegisterF(pVM, &pEndpoint->StatTaskRunTimesMs[i], STAMTYPE_COUNTER,
- STAMVISIBILITY_USED,
- STAMUNIT_OCCURENCES,
- "Milliseconds resolution runtime statistics",
- "/PDM/AsyncCompletion/File/%s/TaskRun3Ms-%u-%u",
- RTPathFilename(pEndpoint->pszUri),
- i*100, i*100+100-1);
- if (RT_FAILURE(rc))
- break;
- }
- }
-
- if (RT_SUCCESS(rc))
- {
- for (unsigned i = 0; i < RT_ELEMENTS(pEndpoint->StatTaskRunTimesMs); i++)
- {
- rc = STAMR3RegisterF(pVM, &pEndpoint->StatTaskRunTimesSec[i], STAMTYPE_COUNTER,
- STAMVISIBILITY_USED,
- STAMUNIT_OCCURENCES,
- "Second resolution runtime statistics",
- "/PDM/AsyncCompletion/File/%s/TaskRun4Sec-%u-%u",
- RTPathFilename(pEndpoint->pszUri),
- i*10, i*10+10-1);
- if (RT_FAILURE(rc))
- break;
- }
- }
+ pEndpoint->pNext = pEndpointClass->pEndpointsHead;
+ if (pEndpointClass->pEndpointsHead)
+ pEndpointClass->pEndpointsHead->pPrev = pEndpoint;
- if (RT_SUCCESS(rc))
- {
- rc = STAMR3RegisterF(pVM, &pEndpoint->StatTaskRunOver100Sec, STAMTYPE_COUNTER,
- STAMVISIBILITY_USED,
- STAMUNIT_OCCURENCES,
- "Tasks which ran more than 100sec",
- "/PDM/AsyncCompletion/File/%s/TaskRunSecGreater100Sec",
- RTPathFilename(pEndpoint->pszUri));
- }
+ pEndpointClass->pEndpointsHead = pEndpoint;
+ pEndpointClass->cEndpoints++;
- if (RT_SUCCESS(rc))
- {
- rc = STAMR3RegisterF(pVM, &pEndpoint->StatIoOpsPerSec, STAMTYPE_COUNTER,
- STAMVISIBILITY_ALWAYS,
- STAMUNIT_OCCURENCES,
- "Processed I/O operations per second",
- "/PDM/AsyncCompletion/File/%s/IoOpsPerSec",
- RTPathFilename(pEndpoint->pszUri));
- }
+ rc = RTCritSectLeave(&pEndpointClass->CritSect);
+ AssertMsg(RT_SUCCESS(rc), ("Failed to enter critical section rc=%Rrc\n", rc));
- if (RT_SUCCESS(rc))
- {
- rc = STAMR3RegisterF(pVM, &pEndpoint->StatIoOpsStarted, STAMTYPE_COUNTER,
- STAMVISIBILITY_ALWAYS,
- STAMUNIT_OCCURENCES,
- "Started I/O operations for this endpoint",
- "/PDM/AsyncCompletion/File/%s/IoOpsStarted",
- RTPathFilename(pEndpoint->pszUri));
- }
+ /* Reference the template. */
+ ASMAtomicIncU32(&pTemplate->cUsed);
- if (RT_SUCCESS(rc))
- {
- rc = STAMR3RegisterF(pVM, &pEndpoint->StatIoOpsCompleted, STAMTYPE_COUNTER,
- STAMVISIBILITY_ALWAYS,
- STAMUNIT_OCCURENCES,
- "Completed I/O operations for this endpoint",
- "/PDM/AsyncCompletion/File/%s/IoOpsCompleted",
- RTPathFilename(pEndpoint->pszUri));
+ *ppEndpoint = pEndpoint;
+ LogFlowFunc((": Created endpoint for %s\n", pszFilename));
+ return VINF_SUCCESS;
}
- /** @todo why bother maintaing rc when it's just ignored /
- logged and not returned? */
-
- pEndpoint->tsIntervalStartMs = RTTimeMilliTS();
-#endif
- *ppEndpoint = pEndpoint;
-
- LogFlowFunc((": Created endpoint for %s: rc=%Rrc\n", pszFilename, rc));
- return VINF_SUCCESS;
+ if (pEndpointClass->fGatherAdvancedStatistics)
+ pdmR3AsyncCompletionStatisticsDeregister(pEndpoint);
}
RTStrFree(pEndpoint->pszUri);
}
@@ -1281,6 +1497,13 @@ VMMR3DECL(int) PDMR3AsyncCompletionEpCreateForFile(PPPDMASYNCCOMPLETIONENDPOINT
return rc;
}
+
+/**
+ * Closes a endpoint waiting for any pending tasks to finish.
+ *
+ * @returns nothing.
+ * @param pEndpoint Handle of the endpoint.
+ */
VMMR3DECL(void) PDMR3AsyncCompletionEpClose(PPDMASYNCCOMPLETIONENDPOINT pEndpoint)
{
LogFlowFunc((": pEndpoint=%p\n", pEndpoint));
@@ -1294,6 +1517,8 @@ VMMR3DECL(void) PDMR3AsyncCompletionEpClose(PPDMASYNCCOMPLETIONENDPOINT pEndpoin
if (!pEndpoint->cUsers)
{
PPDMASYNCCOMPLETIONEPCLASS pEndpointClass = pEndpoint->pEpClass;
+ PVM pVM = pEndpointClass->pVM;
+
pEndpointClass->pEndpointOps->pfnEpClose(pEndpoint);
/* Drop reference from the template. */
@@ -1318,30 +1543,28 @@ VMMR3DECL(void) PDMR3AsyncCompletionEpClose(PPDMASYNCCOMPLETIONENDPOINT pEndpoin
rc = RTCritSectLeave(&pEndpointClass->CritSect);
AssertMsg(RT_SUCCESS(rc), ("Failed to enter critical section rc=%Rrc\n", rc));
-#ifdef VBOX_WITH_STATISTICS
- /* Deregister the statistics part */
- PVM pVM = pEndpointClass->pVM;
-
- for (unsigned i = 0; i < RT_ELEMENTS(pEndpoint->StatTaskRunTimesNs); i++)
- STAMR3Deregister(pVM, &pEndpoint->StatTaskRunTimesNs[i]);
- for (unsigned i = 0; i < RT_ELEMENTS(pEndpoint->StatTaskRunTimesUs); i++)
- STAMR3Deregister(pVM, &pEndpoint->StatTaskRunTimesUs[i]);
- for (unsigned i = 0; i < RT_ELEMENTS(pEndpoint->StatTaskRunTimesMs); i++)
- STAMR3Deregister(pVM, &pEndpoint->StatTaskRunTimesMs[i]);
- for (unsigned i = 0; i < RT_ELEMENTS(pEndpoint->StatTaskRunTimesMs); i++)
- STAMR3Deregister(pVM, &pEndpoint->StatTaskRunTimesSec[i]);
-
- STAMR3Deregister(pVM, &pEndpoint->StatTaskRunOver100Sec);
- STAMR3Deregister(pVM, &pEndpoint->StatIoOpsPerSec);
- STAMR3Deregister(pVM, &pEndpoint->StatIoOpsStarted);
- STAMR3Deregister(pVM, &pEndpoint->StatIoOpsCompleted);
-#endif
+ if (pEndpointClass->fGatherAdvancedStatistics)
+ pdmR3AsyncCompletionStatisticsDeregister(pEndpoint);
RTStrFree(pEndpoint->pszUri);
MMR3HeapFree(pEndpoint);
}
}
+
+/**
+ * Creates a read task on the given endpoint.
+ *
+ * @returns VBox status code.
+ * @param pEndpoint The file endpoint to read from.
+ * @param off Where to start reading from.
+ * @param paSegments Scatter gather list to store the data in.
+ * @param cSegments Number of segments in the list.
+ * @param cbRead The overall number of bytes to read.
+ * @param pvUser Opaque user data returned in the completion callback
+ * upon completion of the task.
+ * @param ppTask Where to store the task handle on success.
+ */
VMMR3DECL(int) PDMR3AsyncCompletionEpRead(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, RTFOFF off,
PCRTSGSEG paSegments, unsigned cSegments,
size_t cbRead, void *pvUser,
@@ -1363,13 +1586,32 @@ VMMR3DECL(int) PDMR3AsyncCompletionEpRead(PPDMASYNCCOMPLETIONENDPOINT pEndpoint,
int rc = pEndpoint->pEpClass->pEndpointOps->pfnEpRead(pTask, pEndpoint, off,
paSegments, cSegments, cbRead);
if (RT_SUCCESS(rc))
+ {
+ if (pEndpoint->pEpClass->fGatherAdvancedStatistics)
+ pdmR3AsyncCompletionStatisticsRecordSize(pEndpoint, cbRead);
+
*ppTask = pTask;
+ }
else
pdmR3AsyncCompletionPutTask(pEndpoint, pTask);
return rc;
}
+
+/**
+ * Creates a write task on the given endpoint.
+ *
+ * @returns VBox status code.
+ * @param pEndpoint The file endpoint to write to.
+ * @param off Where to start writing at.
+ * @param paSegments Scatter gather list of the data to write.
+ * @param cSegments Number of segments in the list.
+ * @param cbWrite The overall number of bytes to write.
+ * @param pvUser Opaque user data returned in the completion callback
+ * upon completion of the task.
+ * @param ppTask Where to store the task handle on success.
+ */
VMMR3DECL(int) PDMR3AsyncCompletionEpWrite(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, RTFOFF off,
PCRTSGSEG paSegments, unsigned cSegments,
size_t cbWrite, void *pvUser,
@@ -1392,6 +1634,9 @@ VMMR3DECL(int) PDMR3AsyncCompletionEpWrite(PPDMASYNCCOMPLETIONENDPOINT pEndpoint
paSegments, cSegments, cbWrite);
if (RT_SUCCESS(rc))
{
+ if (pEndpoint->pEpClass->fGatherAdvancedStatistics)
+ pdmR3AsyncCompletionStatisticsRecordSize(pEndpoint, cbWrite);
+
*ppTask = pTask;
}
else
@@ -1400,9 +1645,20 @@ VMMR3DECL(int) PDMR3AsyncCompletionEpWrite(PPDMASYNCCOMPLETIONENDPOINT pEndpoint
return rc;
}
-VMMR3DECL(int) PDMR3AsyncCompletionEpFlush(PPDMASYNCCOMPLETIONENDPOINT pEndpoint,
- void *pvUser,
- PPPDMASYNCCOMPLETIONTASK ppTask)
+
+/**
+ * Creates a flush task on the given endpoint.
+ *
+ * Every read and write task initiated before the flush task is
+ * finished upon completion of this task.
+ *
+ * @returns VBox status code.
+ * @param pEndpoint The file endpoint to flush.
+ * @param pvUser Opaque user data returned in the completion callback
+ * upon completion of the task.
+ * @param ppTask Where to store the task handle on success.
+ */
+VMMR3DECL(int) PDMR3AsyncCompletionEpFlush(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, void *pvUser, PPPDMASYNCCOMPLETIONTASK ppTask)
{
AssertPtrReturn(pEndpoint, VERR_INVALID_POINTER);
AssertPtrReturn(ppTask, VERR_INVALID_POINTER);
@@ -1422,6 +1678,18 @@ VMMR3DECL(int) PDMR3AsyncCompletionEpFlush(PPDMASYNCCOMPLETIONENDPOINT pEndpoint
return rc;
}
+
+/**
+ * Queries the size of an endpoint.
+ *
+ * Not that some endpoints may not support this and will return an error
+ * (sockets for example).
+ *
+ * @returns VBox status code.
+ * @retval VERR_NOT_SUPPORTED if the endpoint does not support this operation.
+ * @param pEndpoint The file endpoint.
+ * @param pcbSize Where to store the size of the endpoint.
+ */
VMMR3DECL(int) PDMR3AsyncCompletionEpGetSize(PPDMASYNCCOMPLETIONENDPOINT pEndpoint,
uint64_t *pcbSize)
{
@@ -1433,8 +1701,21 @@ VMMR3DECL(int) PDMR3AsyncCompletionEpGetSize(PPDMASYNCCOMPLETIONENDPOINT pEndpoi
return VERR_NOT_SUPPORTED;
}
-VMMR3DECL(int) PDMR3AsyncCompletionEpSetSize(PPDMASYNCCOMPLETIONENDPOINT pEndpoint,
- uint64_t cbSize)
+
+/**
+ * Sets the size of an endpoint.
+ *
+ * Not that some endpoints may not support this and will return an error
+ * (sockets for example).
+ *
+ * @returns VBox status code.
+ * @retval VERR_NOT_SUPPORTED if the endpoint does not support this operation.
+ * @param pEndpoint The file endpoint.
+ * @param cbSize The size to set.
+ *
+ * @note PDMR3AsyncCompletionEpFlush should be called before this operation is executed.
+ */
+VMMR3DECL(int) PDMR3AsyncCompletionEpSetSize(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, uint64_t cbSize)
{
AssertPtrReturn(pEndpoint, VERR_INVALID_POINTER);
@@ -1443,19 +1724,27 @@ VMMR3DECL(int) PDMR3AsyncCompletionEpSetSize(PPDMASYNCCOMPLETIONENDPOINT pEndpoi
return VERR_NOT_SUPPORTED;
}
-VMMR3DECL(int) PDMR3AsyncCompletionEpSetBwMgr(PPDMASYNCCOMPLETIONENDPOINT pEndpoint,
- const char *pcszBwMgr)
+
+/**
+ * Assigns or removes a bandwidth control manager to/from the endpoint.
+ *
+ * @returns VBox status code.
+ * @param pEndpoint The endpoint.
+ * @param pszBwMgr The identifer of the new bandwidth manager to assign
+ * or NULL to remove the current one.
+ */
+VMMR3DECL(int) PDMR3AsyncCompletionEpSetBwMgr(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, const char *pszBwMgr)
{
AssertPtrReturn(pEndpoint, VERR_INVALID_POINTER);
PPDMACBWMGR pBwMgrOld = NULL;
PPDMACBWMGR pBwMgrNew = NULL;
int rc = VINF_SUCCESS;
- if (pcszBwMgr)
+ if (pszBwMgr)
{
- pBwMgrNew = pdmacBwMgrFindById(pEndpoint->pEpClass, pcszBwMgr);
+ pBwMgrNew = pdmacBwMgrFindById(pEndpoint->pEpClass, pszBwMgr);
if (pBwMgrNew)
- pdmacBwMgrRef(pBwMgrNew);
+ pdmacBwMgrRetain(pBwMgrNew);
else
rc = VERR_NOT_FOUND;
}
@@ -1464,26 +1753,49 @@ VMMR3DECL(int) PDMR3AsyncCompletionEpSetBwMgr(PPDMASYNCCOMPLETIONENDPOINT pEndpo
{
pBwMgrOld = ASMAtomicXchgPtrT(&pEndpoint->pBwMgr, pBwMgrNew, PPDMACBWMGR);
if (pBwMgrOld)
- pdmacBwMgrUnref(pBwMgrOld);
+ pdmacBwMgrRelease(pBwMgrOld);
}
return rc;
}
+
+/**
+ * Cancels an async completion task.
+ *
+ * If you want to use this method, you have to take great create to make sure
+ * you will never attempt cancel a task which has been completed. Since there is
+ * no reference counting or anything on the task it self, you have to serialize
+ * the cancelation and completion paths such that the aren't racing one another.
+ *
+ * @returns VBox status code
+ * @param pTask The Task to cancel.
+ */
VMMR3DECL(int) PDMR3AsyncCompletionTaskCancel(PPDMASYNCCOMPLETIONTASK pTask)
{
NOREF(pTask);
return VERR_NOT_IMPLEMENTED;
}
-VMMR3DECL(int) PDMR3AsyncCompletionBwMgrSetMaxForFile(PVM pVM, const char *pcszBwMgr, uint32_t cbMaxNew)
+
+/**
+ * Changes the limit of a bandwidth manager for file endpoints to the given value.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszBwMgr The identifer of the bandwidth manager to change.
+ * @param cbMaxNew The new maximum for the bandwidth manager in bytes/sec.
+ */
+VMMR3DECL(int) PDMR3AsyncCompletionBwMgrSetMaxForFile(PUVM pUVM, const char *pszBwMgr, uint32_t cbMaxNew)
{
- AssertPtrReturn(pVM, VERR_INVALID_POINTER);
- AssertPtrReturn(pcszBwMgr, VERR_INVALID_POINTER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertPtrReturn(pszBwMgr, VERR_INVALID_POINTER);
int rc = VINF_SUCCESS;
PPDMASYNCCOMPLETIONEPCLASS pEpClass = pVM->pUVM->pdm.s.apAsyncCompletionEndpointClass[PDMASYNCCOMPLETIONEPCLASSTYPE_FILE];
- PPDMACBWMGR pBwMgr = pdmacBwMgrFindById(pEpClass, pcszBwMgr);
+ PPDMACBWMGR pBwMgr = pdmacBwMgrFindById(pEpClass, pszBwMgr);
if (pBwMgr)
{
/*
diff --git a/src/VBox/VMM/VMMR3/PDMAsyncCompletionFile.cpp b/src/VBox/VMM/VMMR3/PDMAsyncCompletionFile.cpp
index d9786d37..9f60876d 100644
--- a/src/VBox/VMM/VMMR3/PDMAsyncCompletionFile.cpp
+++ b/src/VBox/VMM/VMMR3/PDMAsyncCompletionFile.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2011 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -28,6 +28,7 @@
#include <VBox/log.h>
#include <VBox/dbg.h>
#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/tm.h>
#include <iprt/asm.h>
#include <iprt/assert.h>
@@ -39,6 +40,7 @@
#include <iprt/string.h>
#include <iprt/thread.h>
#include <iprt/path.h>
+#include <iprt/rand.h>
#include "PDMAsyncCompletionFileInternal.h"
@@ -46,9 +48,9 @@
* Internal Functions *
*******************************************************************************/
#ifdef VBOX_WITH_DEBUGGER
-static DECLCALLBACK(int) pdmacEpFileErrorInject(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR pArgs, unsigned cArgs);
+static FNDBGCCMD pdmacEpFileErrorInject;
# ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
-static DECLCALLBACK(int) pdmacEpFileDelayInject(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR pArgs, unsigned cArgs);
+static FNDBGCCMD pdmacEpFileDelayInject;
# endif
#endif
@@ -71,17 +73,19 @@ static const DBGCVARDESC g_aInjectDelayArgs[] =
{ 1, 1, DBGCVAR_CAT_STRING, 0, "direction", "write|read|flush|any." },
{ 1, 1, DBGCVAR_CAT_STRING, 0, "filename", "Filename." },
{ 1, 1, DBGCVAR_CAT_NUMBER, 0, "delay", "Delay in milliseconds." },
- { 1, 1, DBGCVAR_CAT_NUMBER, 0, "reqs", "Number of requests to delay." },
+ { 1, 1, DBGCVAR_CAT_NUMBER, 0, "jitter", "Jitter of the delay." },
+ { 1, 1, DBGCVAR_CAT_NUMBER, 0, "reqs", "Number of requests to delay." }
+
};
# endif
/** Command descriptors. */
static const DBGCCMD g_aCmds[] =
{
- /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
- { "injecterror", 3, 3, &g_aInjectErrorArgs[0], 3, 0, pdmacEpFileErrorInject, "", "Inject error into I/O subsystem." }
+ /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax,.pszDescription */
+ { "injecterror", 3, 3, &g_aInjectErrorArgs[0], 3, 0, pdmacEpFileErrorInject, "", "Inject error into I/O subsystem." }
# ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
- ,{ "injectdelay", 4, 4, &g_aInjectDelayArgs[0], 4, 0, pdmacEpFileDelayInject, "", "Inject a delay of a request." }
+ ,{ "injectdelay", 3, 5, &g_aInjectDelayArgs[0], RT_ELEMENTS(g_aInjectDelayArgs), 0, pdmacEpFileDelayInject, "", "Inject a delay of a request." }
# endif
};
#endif
@@ -329,15 +333,21 @@ void pdmacFileEpTaskCompleted(PPDMACTASKFILE pTask, void *pvUser, int rc)
{
#ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pTaskFile->Core.pEndpoint;
+ PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEpFile->Core.pEpClass;
/* Check if we should delay completion of the request. */
if ( ASMAtomicReadU32(&pEpFile->msDelay) > 0
&& ASMAtomicReadU32(&pEpFile->cReqsDelay) > 0)
{
+ uint64_t tsDelay = pEpFile->msDelay;
+
+ if (pEpFile->msJitter)
+ tsDelay = (RTRandU32() % 100) > 50 ? pEpFile->msDelay + (RTRandU32() % pEpFile->msJitter)
+ : pEpFile->msDelay - (RTRandU32() % pEpFile->msJitter);
ASMAtomicDecU32(&pEpFile->cReqsDelay);
/* Arm the delay. */
- pTaskFile->tsDelayEnd = RTTimeProgramMilliTS() + pEpFile->msDelay;
+ pTaskFile->tsDelayEnd = RTTimeProgramMilliTS() + tsDelay;
/* Append to the list. */
PPDMASYNCCOMPLETIONTASKFILE pHead = NULL;
@@ -347,42 +357,17 @@ void pdmacFileEpTaskCompleted(PPDMACTASKFILE pTask, void *pvUser, int rc)
pTaskFile->pDelayedNext = pHead;
} while (!ASMAtomicCmpXchgPtr(&pEpFile->pDelayedHead, pTaskFile, pHead));
- LogRel(("AIOMgr: Delaying request %#p for %u ms\n", pTaskFile, pEpFile->msDelay));
- return;
- }
-#endif
- pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, pTaskFile->rc, true);
-
-#ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
- /* Check for an expired delay. */
- if (pEpFile->pDelayedHead != NULL)
- {
- uint64_t tsCur = RTTimeProgramMilliTS();
- pTaskFile = ASMAtomicXchgPtrT(&pEpFile->pDelayedHead, NULL, PPDMASYNCCOMPLETIONTASKFILE);
-
- while (pTaskFile)
+ if (tsDelay < pEpClassFile->cMilliesNext)
{
- PPDMASYNCCOMPLETIONTASKFILE pTmp = pTaskFile;
- pTaskFile = pTaskFile->pDelayedNext;
-
- if (tsCur >= pTmp->tsDelayEnd)
- {
- LogRel(("AIOMgr: Delayed request %#p completed\n", pTmp));
- pdmR3AsyncCompletionCompleteTask(&pTmp->Core, pTmp->rc, true);
- }
- else
- {
- /* Prepend to the delayed list again. */
- PPDMASYNCCOMPLETIONTASKFILE pHead = NULL;
- do
- {
- pHead = ASMAtomicReadPtrT(&pEpFile->pDelayedHead, PPDMASYNCCOMPLETIONTASKFILE);
- pTmp->pDelayedNext = pHead;
- } while (!ASMAtomicCmpXchgPtr(&pEpFile->pDelayedHead, pTmp, pHead));
- }
+ ASMAtomicWriteU64(&pEpClassFile->cMilliesNext, tsDelay);
+ TMTimerSetMillies(pEpClassFile->pTimer, tsDelay);
}
+
+ LogRel(("AIOMgr: Delaying request %#p for %u ms\n", pTaskFile, tsDelay));
}
+ else
#endif
+ pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, pTaskFile->rc, true);
}
}
}
@@ -625,21 +610,21 @@ static int pdmacFileEpNativeGetSize(RTFILE hFile, uint64_t *pcbSize)
#ifdef VBOX_WITH_DEBUGGER
/**
- * Error inject callback.
+ * @callback_method_impl{FNDBGCCMD, The '.injecterror' command.}
*/
-static DECLCALLBACK(int) pdmacEpFileErrorInject(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR pArgs, unsigned cArgs)
+static DECLCALLBACK(int) pdmacEpFileErrorInject(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR pArgs, unsigned cArgs)
{
/*
* Validate input.
*/
- DBGC_CMDHLP_REQ_VM_RET(pCmdHlp, pCmd, pVM);
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, -1, cArgs == 3);
DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 0, pArgs[0].enmType == DBGCVAR_TYPE_STRING);
DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 1, pArgs[1].enmType == DBGCVAR_TYPE_STRING);
DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 2, pArgs[2].enmType == DBGCVAR_TYPE_NUMBER);
PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile;
- pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pVM->pUVM->pdm.s.apAsyncCompletionEndpointClass[PDMASYNCCOMPLETIONEPCLASSTYPE_FILE];
+ pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pUVM->pdm.s.apAsyncCompletionEndpointClass[PDMASYNCCOMPLETIONEPCLASSTYPE_FILE];
/* Syntax is "read|write <filename> <status code>" */
bool fWrite;
@@ -654,7 +639,6 @@ static DECLCALLBACK(int) pdmacEpFileErrorInject(PCDBGCCMD pCmd, PDBGCCMDHLP pCmd
if ((uint64_t)rcToInject != pArgs[2].u.u64Number)
return DBGCCmdHlpFail(pCmdHlp, pCmd, "The status code '%lld' is out of range", pArgs[0].u.u64Number);
-
/*
* Search for the matching endpoint.
*/
@@ -691,21 +675,21 @@ static DECLCALLBACK(int) pdmacEpFileErrorInject(PCDBGCCMD pCmd, PDBGCCMDHLP pCmd
# ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
/**
- * Delay inject callback.
+ * @callback_method_impl{FNDBGCCMD, The '.injectdelay' command.}
*/
-static DECLCALLBACK(int) pdmacEpFileDelayInject(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR pArgs, unsigned cArgs)
+static DECLCALLBACK(int) pdmacEpFileDelayInject(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR pArgs, unsigned cArgs)
{
/*
* Validate input.
*/
- DBGC_CMDHLP_REQ_VM_RET(pCmdHlp, pCmd, pVM);
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, -1, cArgs >= 3);
DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 0, pArgs[0].enmType == DBGCVAR_TYPE_STRING);
DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 1, pArgs[1].enmType == DBGCVAR_TYPE_STRING);
DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 2, pArgs[2].enmType == DBGCVAR_TYPE_NUMBER);
PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile;
- pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pVM->pUVM->pdm.s.apAsyncCompletionEndpointClass[PDMASYNCCOMPLETIONEPCLASSTYPE_FILE];
+ pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pUVM->pdm.s.apAsyncCompletionEndpointClass[PDMASYNCCOMPLETIONEPCLASSTYPE_FILE];
/* Syntax is "read|write|flush|any <filename> <delay> [reqs]" */
PDMACFILEREQTYPEDELAY enmDelayType = PDMACFILEREQTYPEDELAY_ANY;
@@ -725,8 +709,11 @@ static DECLCALLBACK(int) pdmacEpFileDelayInject(PCDBGCCMD pCmd, PDBGCCMDHLP pCmd
return DBGCCmdHlpFail(pCmdHlp, pCmd, "The delay '%lld' is out of range", pArgs[0].u.u64Number);
uint32_t cReqsDelay = 1;
- if (cArgs == 4)
- cReqsDelay = (uint32_t)pArgs[3].u.u64Number;
+ uint32_t msJitter = 0;
+ if (cArgs >= 4)
+ msJitter = (uint32_t)pArgs[3].u.u64Number;
+ if (cArgs == 5)
+ cReqsDelay = (uint32_t)pArgs[4].u.u64Number;
/*
* Search for the matching endpoint.
@@ -745,6 +732,7 @@ static DECLCALLBACK(int) pdmacEpFileDelayInject(PCDBGCCMD pCmd, PDBGCCMDHLP pCmd
{
ASMAtomicWriteSize(&pEpFile->enmTypeDelay, enmDelayType);
ASMAtomicWriteU32(&pEpFile->msDelay, msDelay);
+ ASMAtomicWriteU32(&pEpFile->msJitter, msJitter);
ASMAtomicWriteU32(&pEpFile->cReqsDelay, cReqsDelay);
DBGCCmdHlpPrintf(pCmdHlp, "Injected delay for the next %u requests of %u ms into '%s' for %s\n",
@@ -757,6 +745,62 @@ static DECLCALLBACK(int) pdmacEpFileDelayInject(PCDBGCCMD pCmd, PDBGCCMDHLP pCmd
return DBGCCmdHlpFail(pCmdHlp, pCmd, "No file with name '%s' found", pArgs[1].u.pszString);
return VINF_SUCCESS;
}
+
+static DECLCALLBACK(void) pdmacR3TimerCallback(PVM pVM, PTMTIMER pTimer, void *pvUser)
+{
+ uint64_t tsCur = RTTimeProgramMilliTS();
+ uint64_t cMilliesNext = UINT64_MAX;
+ PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pvUser;
+
+ ASMAtomicWriteU64(&pEpClassFile->cMilliesNext, UINT64_MAX);
+
+ /* Go through all endpoints and check for expired requests. */
+ PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEpClassFile->Core.pEndpointsHead;
+
+ while (pEpFile)
+ {
+ /* Check for an expired delay. */
+ if (pEpFile->pDelayedHead != NULL)
+ {
+ PPDMASYNCCOMPLETIONTASKFILE pTaskFile = ASMAtomicXchgPtrT(&pEpFile->pDelayedHead, NULL, PPDMASYNCCOMPLETIONTASKFILE);
+
+ while (pTaskFile)
+ {
+ PPDMASYNCCOMPLETIONTASKFILE pTmp = pTaskFile;
+ pTaskFile = pTaskFile->pDelayedNext;
+
+ if (tsCur >= pTmp->tsDelayEnd)
+ {
+ LogRel(("AIOMgr: Delayed request %#p completed\n", pTmp));
+ pdmR3AsyncCompletionCompleteTask(&pTmp->Core, pTmp->rc, true);
+ }
+ else
+ {
+ /* Prepend to the delayed list again. */
+ PPDMASYNCCOMPLETIONTASKFILE pHead = NULL;
+
+ if (pTmp->tsDelayEnd - tsCur < cMilliesNext)
+ cMilliesNext = pTmp->tsDelayEnd - tsCur;
+
+ do
+ {
+ pHead = ASMAtomicReadPtrT(&pEpFile->pDelayedHead, PPDMASYNCCOMPLETIONTASKFILE);
+ pTmp->pDelayedNext = pHead;
+ } while (!ASMAtomicCmpXchgPtr(&pEpFile->pDelayedHead, pTmp, pHead));
+ }
+ }
+ }
+
+ pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEpFile->Core.pNext;
+ }
+
+ if (cMilliesNext < pEpClassFile->cMilliesNext)
+ {
+ ASMAtomicWriteU64(&pEpClassFile->cMilliesNext, cMilliesNext);
+ TMTimerSetMillies(pEpClassFile->pTimer, cMilliesNext);
+ }
+}
+
# endif /* PDM_ASYNC_COMPLETION_FILE_WITH_DELAY */
#endif /* VBOX_WITH_DEBUGGER */
@@ -835,6 +879,12 @@ static int pdmacFileInitialize(PPDMASYNCCOMPLETIONEPCLASS pClassGlobals, PCFGMNO
rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
AssertRC(rc);
}
+
+#ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
+ rc = TMR3TimerCreateInternal(pEpClassFile->Core.pVM, TMCLOCK_REAL, pdmacR3TimerCallback, pEpClassFile, "AC Delay", &pEpClassFile->pTimer);
+ AssertRC(rc);
+ pEpClassFile->cMilliesNext = UINT64_MAX;
+#endif
#endif
return rc;
@@ -1103,8 +1153,8 @@ static int pdmacFileEpClose(PPDMASYNCCOMPLETIONENDPOINT pEndpoint)
RTFileClose(pEpFile->hFile);
#ifdef VBOX_WITH_STATISTICS
- STAMR3Deregister(pEpClassFile->Core.pVM, &pEpFile->StatRead);
- STAMR3Deregister(pEpClassFile->Core.pVM, &pEpFile->StatWrite);
+ /* Not sure if this might be unnecessary because of similar statement in pdmR3AsyncCompletionStatisticsDeregister? */
+ STAMR3DeregisterF(pEpClassFile->Core.pVM->pUVM, "/PDM/AsyncCompletion/File/%s/*", RTPathFilename(pEpFile->Core.pszUri));
#endif
return VINF_SUCCESS;
diff --git a/src/VBox/VMM/VMMR3/PDMAsyncCompletionFileFailsafe.cpp b/src/VBox/VMM/VMMR3/PDMAsyncCompletionFileFailsafe.cpp
index a32f77e5..4105e0c0 100644
--- a/src/VBox/VMM/VMMR3/PDMAsyncCompletionFileFailsafe.cpp
+++ b/src/VBox/VMM/VMMR3/PDMAsyncCompletionFileFailsafe.cpp
@@ -5,7 +5,7 @@
*/
/*
- * Copyright (C) 2006-2008 Oracle Corporation
+ * Copyright (C) 2006-2011 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
diff --git a/src/VBox/VMM/VMMR3/PDMAsyncCompletionFileNormal.cpp b/src/VBox/VMM/VMMR3/PDMAsyncCompletionFileNormal.cpp
index 2b37f009..44d31e7f 100644
--- a/src/VBox/VMM/VMMR3/PDMAsyncCompletionFileNormal.cpp
+++ b/src/VBox/VMM/VMMR3/PDMAsyncCompletionFileNormal.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2011 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -54,9 +54,9 @@ int pdmacFileAioMgrNormalInit(PPDMACEPFILEMGR pAioMgr)
{
pAioMgr->cRequestsActiveMax = PDMACEPFILEMGR_REQS_STEP;
- int rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, RTFILEAIO_UNLIMITED_REQS);
+ int rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, RTFILEAIO_UNLIMITED_REQS, 0 /* fFlags */);
if (rc == VERR_OUT_OF_RANGE)
- rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, pAioMgr->cRequestsActiveMax);
+ rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, pAioMgr->cRequestsActiveMax, 0 /* fFlags */);
if (RT_SUCCESS(rc))
{
@@ -354,9 +354,9 @@ static int pdmacFileAioMgrNormalGrow(PPDMACEPFILEMGR pAioMgr)
pAioMgr->cRequestsActiveMax += PDMACEPFILEMGR_REQS_STEP;
RTFILEAIOCTX hAioCtxNew = NIL_RTFILEAIOCTX;
- int rc = RTFileAioCtxCreate(&hAioCtxNew, RTFILEAIO_UNLIMITED_REQS);
+ int rc = RTFileAioCtxCreate(&hAioCtxNew, RTFILEAIO_UNLIMITED_REQS, 0 /* fFlags */);
if (rc == VERR_OUT_OF_RANGE)
- rc = RTFileAioCtxCreate(&hAioCtxNew, pAioMgr->cRequestsActiveMax);
+ rc = RTFileAioCtxCreate(&hAioCtxNew, pAioMgr->cRequestsActiveMax, 0 /* fFlags */);
if (RT_SUCCESS(rc))
{
@@ -637,12 +637,19 @@ static int pdmacFileAioMgrNormalReqsEnqueue(PPDMACEPFILEMGR pAioMgr,
static bool pdmacFileAioMgrNormalIsRangeLocked(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
RTFOFF offStart, size_t cbRange,
- PPDMACTASKFILE pTask)
+ PPDMACTASKFILE pTask, bool fAlignedReq)
{
AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
|| pTask->enmTransferType == PDMACTASKFILETRANSFER_READ,
("Invalid task type %d\n", pTask->enmTransferType));
+ /*
+ * If there is no unaligned request active and the current one is aligned
+ * just pass it through.
+ */
+ if (!pEndpoint->AioMgr.cLockedReqsActive && fAlignedReq)
+ return false;
+
PPDMACFILERANGELOCK pRangeLock;
pRangeLock = (PPDMACFILERANGELOCK)RTAvlrFileOffsetRangeGet(pEndpoint->AioMgr.pTreeRangesLocked, offStart);
if (!pRangeLock)
@@ -658,12 +665,7 @@ static bool pdmacFileAioMgrNormalIsRangeLocked(PPDMASYNCCOMPLETIONENDPOINTFILE p
}
/* Check whether we have one of the situations explained below */
- if ( pRangeLock
-#if 0 /** @todo later. For now we will just block all requests if they interfere */
- && ( (pRangeLock->fReadLock && pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
- || (!pRangeLock->fReadLock)
-#endif
- )
+ if (pRangeLock)
{
/* Add to the list. */
pTask->pNext = NULL;
@@ -689,15 +691,25 @@ static bool pdmacFileAioMgrNormalIsRangeLocked(PPDMASYNCCOMPLETIONENDPOINTFILE p
static int pdmacFileAioMgrNormalRangeLock(PPDMACEPFILEMGR pAioMgr,
PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
RTFOFF offStart, size_t cbRange,
- PPDMACTASKFILE pTask)
+ PPDMACTASKFILE pTask, bool fAlignedReq)
{
LogFlowFunc(("pAioMgr=%#p pEndpoint=%#p offStart=%RTfoff cbRange=%zu pTask=%#p\n",
pAioMgr, pEndpoint, offStart, cbRange, pTask));
- AssertMsg(!pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbRange, pTask),
+ AssertMsg(!pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbRange, pTask, fAlignedReq),
("Range is already locked offStart=%RTfoff cbRange=%u\n",
offStart, cbRange));
+ /*
+ * If there is no unaligned request active and the current one is aligned
+ * just don't use the lock.
+ */
+ if (!pEndpoint->AioMgr.cLockedReqsActive && fAlignedReq)
+ {
+ pTask->pRangeLock = NULL;
+ return VINF_SUCCESS;
+ }
+
PPDMACFILERANGELOCK pRangeLock = (PPDMACFILERANGELOCK)RTMemCacheAlloc(pAioMgr->hMemCacheRangeLocks);
if (!pRangeLock)
return VERR_NO_MEMORY;
@@ -715,6 +727,7 @@ static int pdmacFileAioMgrNormalRangeLock(PPDMACEPFILEMGR pAioMgr,
/* Let the task point to its lock. */
pTask->pRangeLock = pRangeLock;
+ pEndpoint->AioMgr.cLockedReqsActive++;
return VINF_SUCCESS;
}
@@ -728,7 +741,10 @@ static PPDMACTASKFILE pdmacFileAioMgrNormalRangeLockFree(PPDMACEPFILEMGR pAioMgr
LogFlowFunc(("pAioMgr=%#p pEndpoint=%#p pRangeLock=%#p\n",
pAioMgr, pEndpoint, pRangeLock));
- AssertPtr(pRangeLock);
+ /* pRangeLock can be NULL if there was no lock assigned with the task. */
+ if (!pRangeLock)
+ return NULL;
+
Assert(pRangeLock->cRefs == 1);
RTAvlrFileOffsetRemove(pEndpoint->AioMgr.pTreeRangesLocked, pRangeLock->Core.Key);
@@ -736,6 +752,7 @@ static PPDMACTASKFILE pdmacFileAioMgrNormalRangeLockFree(PPDMACEPFILEMGR pAioMgr
pRangeLock->pWaitingTasksHead = NULL;
pRangeLock->pWaitingTasksTail = NULL;
RTMemCacheFree(pAioMgr->hMemCacheRangeLocks, pRangeLock);
+ pEndpoint->AioMgr.cLockedReqsActive--;
return pTasksWaitingHead;
}
@@ -771,7 +788,8 @@ static int pdmacFileAioMgrNormalTaskPrepareBuffered(PPDMACEPFILEMGR pAioMgr,
* the same range. This will result in data corruption if both are executed concurrently.
*/
int rc = VINF_SUCCESS;
- bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, pTask->Off, pTask->DataSeg.cbSeg, pTask);
+ bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, pTask->Off, pTask->DataSeg.cbSeg, pTask,
+ true /* fAlignedReq */);
if (!fLocked)
{
/* Get a request handle. */
@@ -799,7 +817,7 @@ static int pdmacFileAioMgrNormalTaskPrepareBuffered(PPDMACEPFILEMGR pAioMgr,
rc = pdmacFileAioMgrNormalRangeLock(pAioMgr, pEndpoint, pTask->Off,
pTask->DataSeg.cbSeg,
- pTask);
+ pTask, true /* fAlignedReq */);
if (RT_SUCCESS(rc))
{
@@ -825,6 +843,8 @@ static int pdmacFileAioMgrNormalTaskPrepareNonBuffered(PPDMACEPFILEMGR pAioMgr,
RTFOFF offStart = pTask->Off & ~(RTFOFF)(512-1);
size_t cbToTransfer = RT_ALIGN_Z(pTask->DataSeg.cbSeg + (pTask->Off - offStart), 512);
PDMACTASKFILETRANSFER enmTransferType = pTask->enmTransferType;
+ bool fAlignedReq = cbToTransfer == pTask->DataSeg.cbSeg
+ && offStart == pTask->Off;
AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
|| (uint64_t)(offStart + cbToTransfer) <= pEndpoint->cbFile,
@@ -852,7 +872,7 @@ static int pdmacFileAioMgrNormalTaskPrepareNonBuffered(PPDMACEPFILEMGR pAioMgr,
* the same range. This will result in data corruption if both are executed concurrently.
*/
int rc = VINF_SUCCESS;
- bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbToTransfer, pTask);
+ bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbToTransfer, pTask, fAlignedReq);
if (!fLocked)
{
PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
@@ -862,8 +882,7 @@ static int pdmacFileAioMgrNormalTaskPrepareNonBuffered(PPDMACEPFILEMGR pAioMgr,
RTFILEAIOREQ hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
- if ( RT_UNLIKELY(cbToTransfer != pTask->DataSeg.cbSeg)
- || RT_UNLIKELY(offStart != pTask->Off)
+ if ( !fAlignedReq
|| ((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) != (RTR3UINTPTR)pvBuf))
{
LogFlow(("Using bounce buffer for task %#p cbToTransfer=%zd cbSeg=%zd offStart=%RTfoff off=%RTfoff\n",
@@ -928,8 +947,7 @@ static int pdmacFileAioMgrNormalTaskPrepareNonBuffered(PPDMACEPFILEMGR pAioMgr,
offStart, pvBuf, cbToTransfer, pTask);
AssertRC(rc);
- rc = pdmacFileAioMgrNormalRangeLock(pAioMgr, pEndpoint, offStart, cbToTransfer, pTask);
-
+ rc = pdmacFileAioMgrNormalRangeLock(pAioMgr, pEndpoint, offStart, cbToTransfer, pTask, fAlignedReq);
if (RT_SUCCESS(rc))
{
pTask->hReq = hReq;
@@ -998,6 +1016,7 @@ static int pdmacFileAioMgrNormalProcessTaskList(PPDMACTASKFILE pTaskHead,
rc = RTFileAioReqPrepareFlush(hReq, pEndpoint->hFile, pCurr);
if (RT_FAILURE(rc))
{
+ LogRel(("AIOMgr: Preparing flush failed with %Rrc, disabling async flushes\n", rc));
pEndpoint->fAsyncFlushSupported = false;
pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
rc = VINF_SUCCESS; /* Fake success */
@@ -1349,7 +1368,7 @@ static void pdmacFileAioMgrNormalReqCompleteRc(PPDMACEPFILEMGR pAioMgr, RTFILEAI
if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
{
- LogFlow(("Async flushes are not supported for this endpoint, disabling\n"));
+ LogRel(("AIOMgr: Flush failed with %Rrc, disabling async flushes\n", rcReq));
pEndpoint->fAsyncFlushSupported = false;
AssertMsg(pEndpoint->pFlushReq == pTask, ("Failed flush request doesn't match active one\n"));
/* The other method will take over now. */
@@ -1497,8 +1516,8 @@ static void pdmacFileAioMgrNormalReqCompleteRc(PPDMACEPFILEMGR pAioMgr, RTFILEAI
/* Write it now. */
pTask->fPrefetch = false;
- size_t cbToTransfer = RT_ALIGN_Z(pTask->DataSeg.cbSeg, 512);
RTFOFF offStart = pTask->Off & ~(RTFOFF)(512-1);
+ size_t cbToTransfer = RT_ALIGN_Z(pTask->DataSeg.cbSeg + (pTask->Off - offStart), 512);
/* Grow the file if needed. */
if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
diff --git a/src/VBox/VMM/VMMR3/PDMBlkCache.cpp b/src/VBox/VMM/VMMR3/PDMBlkCache.cpp
index 9bbee652..b57e4dd1 100644
--- a/src/VBox/VMM/VMMR3/PDMBlkCache.cpp
+++ b/src/VBox/VMM/VMMR3/PDMBlkCache.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2008 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -41,26 +41,26 @@
{ \
AssertMsg(RTCritSectIsOwner(&Cache->CritSect), \
("Thread does not own critical section\n"));\
- } while(0)
+ } while (0)
# define PDMACFILECACHE_EP_IS_SEMRW_WRITE_OWNER(pEpCache) \
do \
{ \
AssertMsg(RTSemRWIsWriteOwner(pEpCache->SemRWEntries), \
("Thread is not exclusive owner of the per endpoint RW semaphore\n")); \
- } while(0)
+ } while (0)
# define PDMACFILECACHE_EP_IS_SEMRW_READ_OWNER(pEpCache) \
do \
{ \
AssertMsg(RTSemRWIsReadOwner(pEpCache->SemRWEntries), \
("Thread is not read owner of the per endpoint RW semaphore\n")); \
- } while(0)
+ } while (0)
#else
-# define PDMACFILECACHE_IS_CRITSECT_OWNER(Cache) do { } while(0)
-# define PDMACFILECACHE_EP_IS_SEMRW_WRITE_OWNER(pEpCache) do { } while(0)
-# define PDMACFILECACHE_EP_IS_SEMRW_READ_OWNER(pEpCache) do { } while(0)
+# define PDMACFILECACHE_IS_CRITSECT_OWNER(Cache) do { } while (0)
+# define PDMACFILECACHE_EP_IS_SEMRW_WRITE_OWNER(pEpCache) do { } while (0)
+# define PDMACFILECACHE_EP_IS_SEMRW_READ_OWNER(pEpCache) do { } while (0)
#endif
#define PDM_BLK_CACHE_SAVED_STATE_VERSION 1
@@ -1241,9 +1241,8 @@ static int pdmR3BlkCacheRetain(PVM pVM, PPPDMBLKCACHE ppBlkCache, const char *pc
LogFlowFunc(("returns success\n"));
return VINF_SUCCESS;
}
- else
- rc = VERR_NO_MEMORY;
+ rc = VERR_NO_MEMORY;
RTSemRWDestroy(pBlkCache->SemRWEntries);
}
@@ -1445,7 +1444,7 @@ VMMR3DECL(void) PDMR3BlkCacheRelease(PPDMBLKCACHE pBlkCache)
RTSemRWDestroy(pBlkCache->SemRWEntries);
#ifdef VBOX_WITH_STATISTICS
- STAMR3Deregister(pCache->pVM, &pBlkCache->StatWriteDeferred);
+ STAMR3DeregisterF(pCache->pVM->pUVM, "/PDM/BlkCache/%s/Cache/DeferredWrites", pBlkCache->pszId);
#endif
RTStrFree(pBlkCache->pszId);
diff --git a/src/VBox/VMM/VMMR3/PDMCritSect.cpp b/src/VBox/VMM/VMMR3/PDMCritSect.cpp
index 06321b77..039c379b 100644
--- a/src/VBox/VMM/VMMR3/PDMCritSect.cpp
+++ b/src/VBox/VMM/VMMR3/PDMCritSect.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2009 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -22,6 +22,7 @@
#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
#include "PDMInternal.h"
#include <VBox/vmm/pdmcritsect.h>
+#include <VBox/vmm/pdmcritsectrw.h>
#include <VBox/vmm/mm.h>
#include <VBox/vmm/vm.h>
#include <VBox/vmm/uvm.h>
@@ -40,6 +41,7 @@
* Internal Functions *
*******************************************************************************/
static int pdmR3CritSectDeleteOne(PVM pVM, PUVM pUVM, PPDMCRITSECTINT pCritSect, PPDMCRITSECTINT pPrev, bool fFinal);
+static int pdmR3CritSectRwDeleteOne(PVM pVM, PUVM pUVM, PPDMCRITSECTRWINT pCritSect, PPDMCRITSECTRWINT pPrev, bool fFinal);
@@ -49,7 +51,7 @@ static int pdmR3CritSectDeleteOne(PVM pVM, PUVM pUVM, PPDMCRITSECTINT pCritSect,
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
-int pdmR3CritSectInitStats(PVM pVM)
+int pdmR3CritSectBothInitStats(PVM pVM)
{
STAM_REG(pVM, &pVM->pdm.s.StatQueuedCritSectLeaves, STAMTYPE_COUNTER, "/PDM/QueuedCritSectLeaves", STAMUNIT_OCCURENCES,
"Number of times a critical section leave request needed to be queued for ring-3 execution.");
@@ -62,7 +64,7 @@ int pdmR3CritSectInitStats(PVM pVM)
*
* @param pVM Pointer to the VM.
*/
-void pdmR3CritSectRelocate(PVM pVM)
+void pdmR3CritSectBothRelocate(PVM pVM)
{
PUVM pUVM = pVM->pUVM;
RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
@@ -72,6 +74,11 @@ void pdmR3CritSectRelocate(PVM pVM)
pCur = pCur->pNext)
pCur->pVMRC = pVM->pVMRC;
+ for (PPDMCRITSECTRWINT pCur = pUVM->pdm.s.pRwCritSects;
+ pCur;
+ pCur = pCur->pNext)
+ pCur->pVMRC = pVM->pVMRC;
+
RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
}
@@ -90,7 +97,7 @@ void pdmR3CritSectRelocate(PVM pVM)
* @param pVMU The user mode VM handle.
* @remark Don't confuse this with PDMR3CritSectDelete.
*/
-VMMDECL(int) PDMR3CritSectTerm(PVM pVM)
+VMMR3_INT_DECL(int) PDMR3CritSectBothTerm(PVM pVM)
{
PUVM pUVM = pVM->pUVM;
int rc = VINF_SUCCESS;
@@ -104,6 +111,14 @@ VMMDECL(int) PDMR3CritSectTerm(PVM pVM)
rc = rc2;
}
+ while (pUVM->pdm.s.pRwCritSects)
+ {
+ int rc2 = pdmR3CritSectRwDeleteOne(pVM, pUVM, pUVM->pdm.s.pRwCritSects, NULL, true /* final */);
+ AssertRC(rc2);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+
RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
return rc;
}
@@ -126,6 +141,7 @@ static int pdmR3CritSectInitOne(PVM pVM, PPDMCRITSECTINT pCritSect, void *pvKey,
const char *pszNameFmt, va_list va)
{
VM_ASSERT_EMT(pVM);
+ Assert(pCritSect->Core.u32Magic != RTCRITSECT_MAGIC);
/*
* Allocate the semaphore.
@@ -167,15 +183,21 @@ static int pdmR3CritSectInitOne(PVM pVM, PPDMCRITSECTINT pCritSect, void *pvKey,
pCritSect->fAutomaticDefaultCritsect = false;
pCritSect->fUsedByTimerOrSimilar = false;
pCritSect->EventToSignal = NIL_RTSEMEVENT;
- pCritSect->pNext = pVM->pUVM->pdm.s.pCritSects;
pCritSect->pszName = pszName;
- pVM->pUVM->pdm.s.pCritSects = pCritSect;
+
STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZLock", pCritSect->pszName);
STAMR3RegisterF(pVM, &pCritSect->StatContentionRZUnlock,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZUnlock", pCritSect->pszName);
STAMR3RegisterF(pVM, &pCritSect->StatContentionR3, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionR3", pCritSect->pszName);
#ifdef VBOX_WITH_STATISTICS
STAMR3RegisterF(pVM, &pCritSect->StatLocked, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, NULL, "/PDM/CritSects/%s/Locked", pCritSect->pszName);
#endif
+
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ pCritSect->pNext = pUVM->pdm.s.pCritSects;
+ pUVM->pdm.s.pCritSects = pCritSect;
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+
return VINF_SUCCESS;
}
@@ -190,10 +212,113 @@ static int pdmR3CritSectInitOne(PVM pVM, PPDMCRITSECTINT pCritSect, void *pvKey,
/**
+ * Initializes a read/write critical section and inserts it into the list.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ * @param pCritSect The read/write critical section.
+ * @param pvKey The owner key.
+ * @param RT_SRC_POS_DECL The source position.
+ * @param pszName The name of the critical section (for statistics).
+ * @param pszNameFmt Format string for naming the critical section. For
+ * statistics and lock validation.
+ * @param va Arguments for the format string.
+ */
+static int pdmR3CritSectRwInitOne(PVM pVM, PPDMCRITSECTRWINT pCritSect, void *pvKey, RT_SRC_POS_DECL,
+ const char *pszNameFmt, va_list va)
+{
+ VM_ASSERT_EMT(pVM);
+ Assert(pCritSect->Core.u32Magic != RTCRITSECTRW_MAGIC);
+
+ /*
+ * Allocate the semaphores.
+ */
+ AssertCompile(sizeof(SUPSEMEVENT) == sizeof(pCritSect->Core.hEvtWrite));
+ int rc = SUPSemEventCreate(pVM->pSession, (PSUPSEMEVENT)&pCritSect->Core.hEvtWrite);
+ if (RT_SUCCESS(rc))
+ {
+ AssertCompile(sizeof(SUPSEMEVENTMULTI) == sizeof(pCritSect->Core.hEvtRead));
+ rc = SUPSemEventMultiCreate(pVM->pSession, (PSUPSEMEVENT)&pCritSect->Core.hEvtRead);
+ if (RT_SUCCESS(rc))
+ {
+ /* Only format the name once. */
+ char *pszName = RTStrAPrintf2V(pszNameFmt, va); /** @todo plug the "leak"... */
+ if (pszName)
+ {
+ pCritSect->Core.pValidatorRead = NULL;
+ pCritSect->Core.pValidatorWrite = NULL;
+#ifdef PDMCRITSECTRW_STRICT
+# ifdef RT_LOCK_STRICT_ORDER
+ RTLOCKVALCLASS hClass = RTLockValidatorClassForSrcPos(RT_SRC_POS_ARGS, "%s", pszName);
+# else
+ RTLOCKVALCLASS hClass = NIL_RTLOCKVALCLASS;
+# endif
+ rc = RTLockValidatorRecExclCreate(&pCritSect->Core.pValidatorWrite, hClass, RTLOCKVAL_SUB_CLASS_NONE,
+ pCritSect, true, "%s", pszName);
+ if (RT_SUCCESS(rc))
+ rc = RTLockValidatorRecSharedCreate(&pCritSect->Core.pValidatorRead, hClass, RTLOCKVAL_SUB_CLASS_NONE,
+ pCritSect, false /*fSignaller*/, true, "%s", pszName);
+#endif
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Initialize the structure (first bit is c&p from RTCritSectRwInitEx).
+ */
+ pCritSect->Core.u32Magic = RTCRITSECTRW_MAGIC;
+ pCritSect->Core.fNeedReset = false;
+ pCritSect->Core.u64State = 0;
+ pCritSect->Core.hNativeWriter = NIL_RTNATIVETHREAD;
+ pCritSect->Core.cWriterReads = 0;
+ pCritSect->Core.cWriteRecursions = 0;
+#if HC_ARCH_BITS == 32
+ pCritSect->Core.HCPtrPadding = NIL_RTHCPTR;
+#endif
+ pCritSect->pVMR3 = pVM;
+ pCritSect->pVMR0 = pVM->pVMR0;
+ pCritSect->pVMRC = pVM->pVMRC;
+ pCritSect->pvKey = pvKey;
+ pCritSect->pszName = pszName;
+
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionRZEnterExcl, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/ContentionRZEnterExcl", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLeaveExcl, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/ContentionRZLeaveExcl", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionRZEnterShared, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/ContentionRZEnterShared", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLeaveShared, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/ContentionRZLeaveShared", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionR3EnterExcl, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/ContentionR3EnterExcl", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatContentionR3EnterShared, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/ContentionR3EnterShared", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatRZEnterExcl, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/RZEnterExcl", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatRZEnterShared, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/RZEnterShared", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatR3EnterExcl, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/R3EnterExcl", pCritSect->pszName);
+ STAMR3RegisterF(pVM, &pCritSect->StatR3EnterShared, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSectsRw/%s/R3EnterShared", pCritSect->pszName);
+#ifdef VBOX_WITH_STATISTICS
+ STAMR3RegisterF(pVM, &pCritSect->StatWriteLocked, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, NULL, "/PDM/CritSectsRw/%s/WriteLocked", pCritSect->pszName);
+#endif
+
+ PUVM pUVM = pVM->pUVM;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ pCritSect->pNext = pUVM->pdm.s.pRwCritSects;
+ pUVM->pdm.s.pRwCritSects = pCritSect;
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+
+ return VINF_SUCCESS;
+ }
+
+ RTStrFree(pszName);
+ }
+ else
+ rc = VERR_NO_STR_MEMORY;
+ SUPSemEventMultiClose(pVM->pSession, (SUPSEMEVENT)pCritSect->Core.hEvtRead);
+ }
+ SUPSemEventClose(pVM->pSession, (SUPSEMEVENT)pCritSect->Core.hEvtWrite);
+ }
+ return rc;
+}
+
+
+/**
* Initializes a PDM critical section for internal use.
*
* The PDM critical sections are derived from the IPRT critical sections, but
- * works in GC as well.
+ * works in ring-0 and raw-mode context as well.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
@@ -203,7 +328,7 @@ static int pdmR3CritSectInitOne(PVM pVM, PPDMCRITSECTINT pCritSect, void *pvKey,
* @param pszNameFmt Format string for naming the critical section. For
* statistics and lock validation.
* @param ... Arguments for the format string.
- * @thread EMT(0)
+ * @thread EMT
*/
VMMR3DECL(int) PDMR3CritSectInit(PVM pVM, PPDMCRITSECT pCritSect, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
{
@@ -220,10 +345,37 @@ VMMR3DECL(int) PDMR3CritSectInit(PVM pVM, PPDMCRITSECT pCritSect, RT_SRC_POS_DEC
/**
- * Initializes a PDM critical section for a device.
+ * Initializes a PDM read/write critical section for internal use.
*
- * The PDM critical sections are derived from the IPRT critical sections, but
- * works in GC as well.
+ * The PDM read/write critical sections are derived from the IPRT read/write
+ * critical sections, but works in ring-0 and raw-mode context as well.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ * @param pDevIns Device instance.
+ * @param pCritSect Pointer to the read/write critical section.
+ * @param RT_SRC_POS_DECL Use RT_SRC_POS.
+ * @param pszNameFmt Format string for naming the critical section. For
+ * statistics and lock validation.
+ * @param ... Arguments for the format string.
+ * @thread EMT
+ */
+VMMR3DECL(int) PDMR3CritSectRwInit(PVM pVM, PPDMCRITSECTRW pCritSect, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
+{
+#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
+ AssertCompile(sizeof(pCritSect->padding) >= sizeof(pCritSect->s));
+#endif
+ Assert(RT_ALIGN_P(pCritSect, sizeof(uintptr_t)) == pCritSect);
+ va_list va;
+ va_start(va, pszNameFmt);
+ int rc = pdmR3CritSectRwInitOne(pVM, &pCritSect->s, pCritSect, RT_SRC_POS_ARGS, pszNameFmt, va);
+ va_end(va);
+ return rc;
+}
+
+
+/**
+ * Initializes a PDM critical section for a device.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
@@ -241,6 +393,24 @@ int pdmR3CritSectInitDevice(PVM pVM, PPDMDEVINS pDevIns, PPDMCRITSECT pCritSect,
/**
+ * Initializes a PDM read/write critical section for a device.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ * @param pDevIns Device instance.
+ * @param pCritSect Pointer to the read/write critical section.
+ * @param pszNameFmt Format string for naming the critical section. For
+ * statistics and lock validation.
+ * @param va Arguments for the format string.
+ */
+int pdmR3CritSectRwInitDevice(PVM pVM, PPDMDEVINS pDevIns, PPDMCRITSECTRW pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, va_list va)
+{
+ return pdmR3CritSectRwInitOne(pVM, &pCritSect->s, pDevIns, RT_SRC_POS_ARGS, pszNameFmt, va);
+}
+
+
+/**
* Initializes the automatic default PDM critical section for a device.
*
* @returns VBox status code.
@@ -284,6 +454,28 @@ int pdmR3CritSectInitDriver(PVM pVM, PPDMDRVINS pDrvIns, PPDMCRITSECT pCritSect,
/**
+ * Initializes a PDM read/write critical section for a driver.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ * @param pDrvIns Driver instance.
+ * @param pCritSect Pointer to the read/write critical section.
+ * @param pszNameFmt Format string for naming the critical section. For
+ * statistics and lock validation.
+ * @param ... Arguments for the format string.
+ */
+int pdmR3CritSectRwInitDriver(PVM pVM, PPDMDRVINS pDrvIns, PPDMCRITSECTRW pCritSect, RT_SRC_POS_DECL,
+ const char *pszNameFmt, ...)
+{
+ va_list va;
+ va_start(va, pszNameFmt);
+ int rc = pdmR3CritSectRwInitOne(pVM, &pCritSect->s, pDrvIns, RT_SRC_POS_ARGS, pszNameFmt, va);
+ va_end(va);
+ return rc;
+}
+
+
+/**
* Deletes one critical section.
*
* @returns Return code from RTCritSectDelete.
@@ -332,22 +524,86 @@ static int pdmR3CritSectDeleteOne(PVM pVM, PUVM pUVM, PPDMCRITSECTINT pCritSect,
pCritSect->pVMR3 = NULL;
pCritSect->pVMR0 = NIL_RTR0PTR;
pCritSect->pVMRC = NIL_RTRCPTR;
+ if (!fFinal)
+ STAMR3DeregisterF(pVM->pUVM, "/PDM/CritSects/%s/*", pCritSect->pszName);
RTStrFree((char *)pCritSect->pszName);
pCritSect->pszName = NULL;
- if (!fFinal)
- {
- STAMR3Deregister(pVM, &pCritSect->StatContentionRZLock);
- STAMR3Deregister(pVM, &pCritSect->StatContentionRZUnlock);
- STAMR3Deregister(pVM, &pCritSect->StatContentionR3);
-#ifdef VBOX_WITH_STATISTICS
- STAMR3Deregister(pVM, &pCritSect->StatLocked);
-#endif
- }
return rc;
}
/**
+ * Deletes one read/write critical section.
+ *
+ * @returns VBox status code.
+ *
+ * @param pVM Pointer to the VM.
+ * @param pCritSect The read/write critical section.
+ * @param pPrev The previous critical section in the list.
+ * @param fFinal Set if this is the final call and statistics shouldn't be deregistered.
+ *
+ * @remarks Caller must have entered the ListCritSect.
+ */
+static int pdmR3CritSectRwDeleteOne(PVM pVM, PUVM pUVM, PPDMCRITSECTRWINT pCritSect, PPDMCRITSECTRWINT pPrev, bool fFinal)
+{
+ /*
+ * Assert free waiters and so on (c&p from RTCritSectRwDelete).
+ */
+ Assert(pCritSect->Core.u32Magic == RTCRITSECTRW_MAGIC);
+ //Assert(pCritSect->Core.cNestings == 0);
+ //Assert(pCritSect->Core.cLockers == -1);
+ Assert(pCritSect->Core.hNativeWriter == NIL_RTNATIVETHREAD);
+
+ /*
+ * Invalidate the structure and free the semaphores.
+ */
+ if (!ASMAtomicCmpXchgU32(&pCritSect->Core.u32Magic, RTCRITSECTRW_MAGIC_DEAD, RTCRITSECTRW_MAGIC))
+ AssertFailed();
+
+ /*
+ * Unlink it.
+ */
+ if (pPrev)
+ pPrev->pNext = pCritSect->pNext;
+ else
+ pUVM->pdm.s.pRwCritSects = pCritSect->pNext;
+
+ /*
+ * Delete it (parts taken from RTCritSectRwDelete).
+ * In case someone is waiting we'll signal the semaphore cLockers + 1 times.
+ */
+ pCritSect->Core.fFlags = 0;
+ pCritSect->Core.u64State = 0;
+
+ SUPSEMEVENT hEvtWrite = (SUPSEMEVENT)pCritSect->Core.hEvtWrite;
+ pCritSect->Core.hEvtWrite = NIL_RTSEMEVENT;
+ AssertCompile(sizeof(hEvtWrite) == sizeof(pCritSect->Core.hEvtWrite));
+
+ SUPSEMEVENTMULTI hEvtRead = (SUPSEMEVENTMULTI)pCritSect->Core.hEvtRead;
+ pCritSect->Core.hEvtRead = NIL_RTSEMEVENTMULTI;
+ AssertCompile(sizeof(hEvtRead) == sizeof(pCritSect->Core.hEvtRead));
+
+ int rc1 = SUPSemEventClose(pVM->pSession, hEvtWrite); AssertRC(rc1);
+ int rc2 = SUPSemEventMultiClose(pVM->pSession, hEvtRead); AssertRC(rc2);
+
+ RTLockValidatorRecSharedDestroy(&pCritSect->Core.pValidatorRead);
+ RTLockValidatorRecExclDestroy(&pCritSect->Core.pValidatorWrite);
+
+ pCritSect->pNext = NULL;
+ pCritSect->pvKey = NULL;
+ pCritSect->pVMR3 = NULL;
+ pCritSect->pVMR0 = NIL_RTR0PTR;
+ pCritSect->pVMRC = NIL_RTRCPTR;
+ if (!fFinal)
+ STAMR3DeregisterF(pVM->pUVM, "/PDM/CritSectsRw/%s/*", pCritSect->pszName);
+ RTStrFree((char *)pCritSect->pszName);
+ pCritSect->pszName = NULL;
+
+ return RT_SUCCESS(rc1) ? rc2 : rc1;
+}
+
+
+/**
* Deletes all critical sections with a give initializer key.
*
* @returns VBox status code.
@@ -387,28 +643,73 @@ static int pdmR3CritSectDeleteByKey(PVM pVM, void *pvKey)
/**
- * Deletes all undeleted critical sections initialized by a given device.
+ * Deletes all read/write critical sections with a give initializer key.
+ *
+ * @returns VBox status code.
+ * The entire list is processed on failure, so we'll only
+ * return the first error code. This shouldn't be a problem
+ * since errors really shouldn't happen here.
+ * @param pVM Pointer to the VM.
+ * @param pvKey The initializer key.
+ */
+static int pdmR3CritSectRwDeleteByKey(PVM pVM, void *pvKey)
+{
+ /*
+ * Iterate the list and match key.
+ */
+ PUVM pUVM = pVM->pUVM;
+ int rc = VINF_SUCCESS;
+ PPDMCRITSECTRWINT pPrev = NULL;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ PPDMCRITSECTRWINT pCur = pUVM->pdm.s.pRwCritSects;
+ while (pCur)
+ {
+ if (pCur->pvKey == pvKey)
+ {
+ int rc2 = pdmR3CritSectRwDeleteOne(pVM, pUVM, pCur, pPrev, false /* not final */);
+ AssertRC(rc2);
+ if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
+ rc = rc2;
+ }
+
+ /* next */
+ pPrev = pCur;
+ pCur = pCur->pNext;
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+}
+
+
+/**
+ * Deletes all undeleted critical sections (both types) initialized by a given
+ * device.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pDevIns The device handle.
*/
-int pdmR3CritSectDeleteDevice(PVM pVM, PPDMDEVINS pDevIns)
+int pdmR3CritSectBothDeleteDevice(PVM pVM, PPDMDEVINS pDevIns)
{
- return pdmR3CritSectDeleteByKey(pVM, pDevIns);
+ int rc1 = pdmR3CritSectDeleteByKey(pVM, pDevIns);
+ int rc2 = pdmR3CritSectRwDeleteByKey(pVM, pDevIns);
+ return RT_SUCCESS(rc1) ? rc2 : rc1;
}
/**
- * Deletes all undeleted critical sections initialized by a given driver.
+ * Deletes all undeleted critical sections (both types) initialized by a given
+ * driver.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pDrvIns The driver handle.
*/
-int pdmR3CritSectDeleteDriver(PVM pVM, PPDMDRVINS pDrvIns)
+int pdmR3CritSectBothDeleteDriver(PVM pVM, PPDMDRVINS pDrvIns)
{
- return pdmR3CritSectDeleteByKey(pVM, pDrvIns);
+ int rc1 = pdmR3CritSectDeleteByKey(pVM, pDrvIns);
+ int rc2 = pdmR3CritSectRwDeleteByKey(pVM, pDrvIns);
+ return RT_SUCCESS(rc1) ? rc2 : rc1;
}
@@ -452,6 +753,45 @@ VMMR3DECL(int) PDMR3CritSectDelete(PPDMCRITSECT pCritSect)
/**
+ * Deletes the read/write critical section.
+ *
+ * @returns VBox status code.
+ * @param pCritSect The PDM read/write critical section to destroy.
+ */
+VMMR3DECL(int) PDMR3CritSectRwDelete(PPDMCRITSECTRW pCritSect)
+{
+ if (!PDMCritSectRwIsInitialized(pCritSect))
+ return VINF_SUCCESS;
+
+ /*
+ * Find and unlink it.
+ */
+ PVM pVM = pCritSect->s.pVMR3;
+ PUVM pUVM = pVM->pUVM;
+ AssertReleaseReturn(pVM, VERR_PDM_CRITSECT_IPE);
+ PPDMCRITSECTRWINT pPrev = NULL;
+ RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
+ PPDMCRITSECTRWINT pCur = pUVM->pdm.s.pRwCritSects;
+ while (pCur)
+ {
+ if (pCur == &pCritSect->s)
+ {
+ int rc = pdmR3CritSectRwDeleteOne(pVM, pUVM, pCur, pPrev, false /* not final */);
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ return rc;
+ }
+
+ /* next */
+ pPrev = pCur;
+ pCur = pCur->pNext;
+ }
+ RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
+ AssertReleaseMsgFailed(("pCritSect=%p wasn't found!\n", pCritSect));
+ return VERR_PDM_CRITSECT_NOT_FOUND;
+}
+
+
+/**
* Gets the name of the critical section.
*
*
@@ -468,6 +808,22 @@ VMMR3DECL(const char *) PDMR3CritSectName(PCPDMCRITSECT pCritSect)
/**
+ * Gets the name of the read/write critical section.
+ *
+ *
+ * @returns Pointer to the critical section name (read only) on success,
+ * NULL on failure (invalid critical section).
+ * @param pCritSect The read/write critical section.
+ */
+VMMR3DECL(const char *) PDMR3CritSectRwName(PCPDMCRITSECTRW pCritSect)
+{
+ AssertPtrReturn(pCritSect, NULL);
+ AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECTRW_MAGIC, NULL);
+ return pCritSect->s.pszName;
+}
+
+
+/**
* Yield the critical section if someone is waiting on it.
*
* When yielding, we'll leave the critical section and try to make sure the
@@ -556,8 +912,66 @@ VMMR3DECL(int) PDMR3CritSectScheduleExitEvent(PPDMCRITSECT pCritSect, RTSEMEVENT
/**
- * Counts the critical sections owned by the calling thread, optionally
- * returning a comma separated list naming them.
+ * PDMR3CritSectBothCountOwned worker.
+ *
+ * @param pszName The critical section name.
+ * @param ppszNames Pointer to the pszNames variable.
+ * @param pcchLeft Pointer to the cchLeft variable.
+ * @param fFirst Whether this is the first name or not.
+ */
+static void pdmR3CritSectAppendNameToList(char const *pszName, char **ppszNames, size_t *pcchLeft, bool fFirst)
+{
+ size_t cchLeft = *pcchLeft;
+ if (cchLeft)
+ {
+ char *pszNames = *ppszNames;
+
+ /* try add comma. */
+ if (fFirst)
+ {
+ *pszNames++ = ',';
+ if (--cchLeft)
+ {
+ *pszNames++ = ' ';
+ cchLeft--;
+ }
+ }
+
+ /* try copy the name. */
+ if (cchLeft)
+ {
+ size_t const cchName = strlen(pszName);
+ if (cchName < cchLeft)
+ {
+ memcpy(pszNames, pszName, cchName);
+ pszNames += cchName;
+ cchLeft -= cchName;
+ }
+ else
+ {
+ if (cchLeft > 2)
+ {
+ memcpy(pszNames, pszName, cchLeft - 2);
+ pszNames += cchLeft - 2;
+ cchLeft = 2;
+ }
+ while (cchLeft-- > 0)
+ *pszNames++ = '+';
+ }
+ }
+ *pszNames = '\0';
+
+ *pcchLeft = cchLeft;
+ *ppszNames = pszNames;
+ }
+}
+
+
+/**
+ * Counts the critical sections (both type) owned by the calling thread,
+ * optionally returning a comma separated list naming them.
+ *
+ * Read ownerships are not included in non-strict builds.
*
* This is for diagnostic purposes only.
*
@@ -582,9 +996,9 @@ VMMR3DECL(uint32_t) PDMR3CritSectCountOwned(PVM pVM, char *pszNames, size_t cbNa
/*
* Iterate the critical sections.
*/
- /* This is unsafe, but wtf. */
- RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
uint32_t cCritSects = 0;
+ RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
+ /* This is unsafe, but wtf. */
for (PPDMCRITSECTINT pCur = pVM->pUVM->pdm.s.pCritSects;
pCur;
pCur = pCur->pNext)
@@ -593,47 +1007,20 @@ VMMR3DECL(uint32_t) PDMR3CritSectCountOwned(PVM pVM, char *pszNames, size_t cbNa
if (pCur->Core.NativeThreadOwner == hNativeThread)
{
cCritSects++;
+ pdmR3CritSectAppendNameToList(pCur->pszName, &pszNames, &cchLeft, cCritSects == 1);
+ }
+ }
- /*
- * Copy the name if there is space. Fun stuff.
- */
- if (cchLeft)
- {
- /* try add comma. */
- if (cCritSects != 1)
- {
- *pszNames++ = ',';
- if (--cchLeft)
- {
- *pszNames++ = ' ';
- cchLeft--;
- }
- }
-
- /* try copy the name. */
- if (cchLeft)
- {
- size_t const cchName = strlen(pCur->pszName);
- if (cchName < cchLeft)
- {
- memcpy(pszNames, pCur->pszName, cchName);
- pszNames += cchName;
- cchLeft -= cchName;
- }
- else
- {
- if (cchLeft > 2)
- {
- memcpy(pszNames, pCur->pszName, cchLeft - 2);
- pszNames += cchLeft - 2;
- cchLeft = 2;
- }
- while (cchLeft-- > 0)
- *pszNames++ = '+';
- }
- }
- *pszNames = '\0';
- }
+ /* This is unsafe, but wtf. */
+ for (PPDMCRITSECTRWINT pCur = pVM->pUVM->pdm.s.pRwCritSects;
+ pCur;
+ pCur = pCur->pNext)
+ {
+ if ( pCur->Core.hNativeWriter == hNativeThread
+ || PDMCritSectRwIsReadOwner((PPDMCRITSECTRW)pCur, false /*fWannaHear*/) )
+ {
+ cCritSects++;
+ pdmR3CritSectAppendNameToList(pCur->pszName, &pszNames, &cchLeft, cCritSects == 1);
}
}
@@ -649,7 +1036,7 @@ VMMR3DECL(uint32_t) PDMR3CritSectCountOwned(PVM pVM, char *pszNames, size_t cbNa
*
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(void) PDMR3CritSectLeaveAll(PVM pVM)
+VMMR3_INT_DECL(void) PDMR3CritSectLeaveAll(PVM pVM)
{
RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
PUVM pUVM = pVM->pUVM;
diff --git a/src/VBox/VMM/VMMR3/PDMDevHlp.cpp b/src/VBox/VMM/VMMR3/PDMDevHlp.cpp
index 33853d07..fd56de98 100644
--- a/src/VBox/VMM/VMMR3/PDMDevHlp.cpp
+++ b/src/VBox/VMM/VMMR3/PDMDevHlp.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2011 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -23,6 +23,7 @@
#include "PDMInternal.h"
#include <VBox/vmm/pdm.h>
#include <VBox/vmm/mm.h>
+#include <VBox/vmm/hm.h>
#include <VBox/vmm/pgm.h>
#include <VBox/vmm/iom.h>
#ifdef VBOX_WITH_REM
@@ -64,7 +65,13 @@
*/
DECLINLINE(int) pdmR3DevGetSymbolRCLazy(PPDMDEVINS pDevIns, const char *pszSymbol, PRTRCPTR ppvValue)
{
- return PDMR3LdrGetSymbolRCLazy(pDevIns->Internal.s.pVMR3,
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ if (HMIsEnabled(pVM))
+ {
+ *ppvValue = NIL_RTRCPTR;
+ return VINF_SUCCESS;
+ }
+ return PDMR3LdrGetSymbolRCLazy(pVM,
pDevIns->Internal.s.pDevR3->pReg->szRCMod,
pDevIns->Internal.s.pDevR3->pszRCSearchPath,
pszSymbol, ppvValue);
@@ -120,7 +127,8 @@ static DECLCALLBACK(int) pdmR3DevHlp_IOPortRegisterRC(PPDMDEVINS pDevIns, RTIOPO
const char *pszOutStr, const char *pszInStr, const char *pszDesc)
{
PDMDEV_ASSERT_DEVINS(pDevIns);
- VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
LogFlow(("pdmR3DevHlp_IOPortRegisterRC: caller='%s'/%d: Port=%#x cPorts=%#x pvUser=%p pszOut=%p:{%s} pszIn=%p:{%s} pszOutStr=%p:{%s} pszInStr=%p:{%s} pszDesc=%p:{%s}\n", pDevIns->pReg->szName, pDevIns->iInstance,
Port, cPorts, pvUser, pszOut, pszOut, pszIn, pszIn, pszOutStr, pszOutStr, pszInStr, pszInStr, pszDesc, pszDesc));
@@ -128,8 +136,9 @@ static DECLCALLBACK(int) pdmR3DevHlp_IOPortRegisterRC(PPDMDEVINS pDevIns, RTIOPO
* Resolve the functions (one of the can be NULL).
*/
int rc = VINF_SUCCESS;
- if ( pDevIns->pReg->szRCMod[0]
- && (pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_RC))
+ if ( pDevIns->pReg->szRCMod[0]
+ && (pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_RC)
+ && !HMIsEnabled(pVM))
{
RTRCPTR RCPtrIn = NIL_RTRCPTR;
if (pszIn)
@@ -167,12 +176,12 @@ static DECLCALLBACK(int) pdmR3DevHlp_IOPortRegisterRC(PPDMDEVINS pDevIns, RTIOPO
}
#endif
- rc = IOMR3IOPortRegisterRC(pDevIns->Internal.s.pVMR3, pDevIns, Port, cPorts, pvUser, RCPtrOut, RCPtrIn, RCPtrOutStr, RCPtrInStr, pszDesc);
+ rc = IOMR3IOPortRegisterRC(pVM, pDevIns, Port, cPorts, pvUser, RCPtrOut, RCPtrIn, RCPtrOutStr, RCPtrInStr, pszDesc);
}
}
- else
+ else if (!HMIsEnabled(pVM))
{
- AssertMsgFailed(("No GC module for this driver!\n"));
+ AssertMsgFailed(("No RC module for this driver!\n"));
rc = VERR_INVALID_PARAMETER;
}
@@ -294,7 +303,8 @@ static DECLCALLBACK(int) pdmR3DevHlp_MMIORegisterRC(PPDMDEVINS pDevIns, RTGCPHYS
const char *pszWrite, const char *pszRead, const char *pszFill)
{
PDMDEV_ASSERT_DEVINS(pDevIns);
- VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
LogFlow(("pdmR3DevHlp_MMIORegisterRC: caller='%s'/%d: GCPhysStart=%RGp cbRange=%#x pvUser=%p pszWrite=%p:{%s} pszRead=%p:{%s} pszFill=%p:{%s}\n",
pDevIns->pReg->szName, pDevIns->iInstance, GCPhysStart, cbRange, pvUser, pszWrite, pszWrite, pszRead, pszRead, pszFill, pszFill));
@@ -304,8 +314,9 @@ static DECLCALLBACK(int) pdmR3DevHlp_MMIORegisterRC(PPDMDEVINS pDevIns, RTGCPHYS
* Not all function have to present, leave it to IOM to enforce this.
*/
int rc = VINF_SUCCESS;
- if ( pDevIns->pReg->szRCMod[0]
- && (pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_RC))
+ if ( pDevIns->pReg->szRCMod[0]
+ && (pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_RC)
+ && !HMIsEnabled(pVM))
{
RTRCPTR RCPtrWrite = NIL_RTRCPTR;
if (pszWrite)
@@ -322,7 +333,7 @@ static DECLCALLBACK(int) pdmR3DevHlp_MMIORegisterRC(PPDMDEVINS pDevIns, RTGCPHYS
rc3 = pdmR3DevGetSymbolRCLazy(pDevIns, pszFill, &RCPtrFill);
if (RT_SUCCESS(rc) && RT_SUCCESS(rc2) && RT_SUCCESS(rc3))
- rc = IOMR3MmioRegisterRC(pDevIns->Internal.s.pVMR3, pDevIns, GCPhysStart, cbRange, pvUser, RCPtrWrite, RCPtrRead, RCPtrFill);
+ rc = IOMR3MmioRegisterRC(pVM, pDevIns, GCPhysStart, cbRange, pvUser, RCPtrWrite, RCPtrRead, RCPtrFill);
else
{
AssertMsgRC(rc, ("Failed to resolve %s.%s (pszWrite)\n", pDevIns->pReg->szRCMod, pszWrite));
@@ -334,9 +345,9 @@ static DECLCALLBACK(int) pdmR3DevHlp_MMIORegisterRC(PPDMDEVINS pDevIns, RTGCPHYS
rc = rc3;
}
}
- else
+ else if (!HMIsEnabled(pVM))
{
- AssertMsgFailed(("No GC module for this driver!\n"));
+ AssertMsgFailed(("No RC module for this driver!\n"));
rc = VERR_INVALID_PARAMETER;
}
@@ -440,7 +451,7 @@ static DECLCALLBACK(int) pdmR3DevHlp_MMIO2Deregister(PPDMDEVINS pDevIns, uint32_
LogFlow(("pdmR3DevHlp_MMIO2Deregister: caller='%s'/%d: iRegion=%#x\n",
pDevIns->pReg->szName, pDevIns->iInstance, iRegion));
- AssertReturn(iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
+ AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
int rc = PGMR3PhysMMIO2Deregister(pDevIns->Internal.s.pVMR3, pDevIns, iRegion);
@@ -664,7 +675,7 @@ static DECLCALLBACK(uint64_t) pdmR3DevHlp_TMTimeVirtGetNano(PPDMDEVINS pDevIns)
LogFlow(("pdmR3DevHlp_TMTimeVirtGetNano: caller='%s'\n",
pDevIns->pReg->szName, pDevIns->iInstance));
- uint64_t u64Time = TMVirtualSyncGet(pDevIns->Internal.s.pVMR3);
+ uint64_t u64Time = TMVirtualGet(pDevIns->Internal.s.pVMR3);
uint64_t u64Nano = TMVirtualToNano(pDevIns->Internal.s.pVMR3, u64Time);
LogFlow(("pdmR3DevHlp_TMTimeVirtGetNano: caller='%s'/%d: returns %RU64\n", pDevIns->pReg->szName, pDevIns->iInstance, u64Nano));
@@ -672,6 +683,20 @@ static DECLCALLBACK(uint64_t) pdmR3DevHlp_TMTimeVirtGetNano(PPDMDEVINS pDevIns)
}
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetSupDrvSession} */
+static DECLCALLBACK(PSUPDRVSESSION) pdmR3DevHlp_GetSupDrvSession(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_GetSupDrvSession: caller='%s'\n",
+ pDevIns->pReg->szName, pDevIns->iInstance));
+
+ PSUPDRVSESSION pSession = pDevIns->Internal.s.pVMR3->pSession;
+
+ LogFlow(("pdmR3DevHlp_GetSupDrvSession: caller='%s'/%d: returns %#p\n", pDevIns->pReg->szName, pDevIns->iInstance, pSession));
+ return pSession;
+}
+
+
/** @interface_method_impl{PDMDEVHLPR3,pfnPhysRead} */
static DECLCALLBACK(int) pdmR3DevHlp_PhysRead(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
{
@@ -1009,6 +1034,22 @@ static DECLCALLBACK(int) pdmR3DevHlp_DBGFInfoRegister(PPDMDEVINS pDevIns, const
}
+/** @interface_method_impl{PDMDEVHLPR3,pfnDBGFRegRegister} */
+static DECLCALLBACK(int) pdmR3DevHlp_DBGFRegRegister(PPDMDEVINS pDevIns, PCDBGFREGDESC paRegisters)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_DBGFRegRegister: caller='%s'/%d: paRegisters=%p\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, paRegisters));
+
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ int rc = DBGFR3RegRegisterDevice(pVM, paRegisters, pDevIns, pDevIns->pReg->szName, pDevIns->iInstance);
+
+ LogFlow(("pdmR3DevHlp_DBGFRegRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
+ return rc;
+}
+
+
/** @interface_method_impl{PDMDEVHLPR3,pfnDBGFTraceBuf} */
static DECLCALLBACK(RTTRACEBUF) pdmR3DevHlp_DBGFTraceBuf(PPDMDEVINS pDevIns)
{
@@ -1065,22 +1106,6 @@ static DECLCALLBACK(void) pdmR3DevHlp_STAMRegisterV(PPDMDEVINS pDevIns, void *pv
}
-/** @interface_method_impl{PDMDEVHLPR3,pfnPCIDevPhysRead} */
-static DECLCALLBACK(int) pdmR3DevHlp_PCIPhysRead(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
-{
- PDMDEV_ASSERT_DEVINS(pDevIns);
- return PDMDevHlpPCIDevPhysRead(pDevIns->Internal.s.pPciDeviceR3, GCPhys, pvBuf, cbRead);
-}
-
-
-/** @interface_method_impl{PDMDEVHLPR3,pfnPCIDevPhysWrite} */
-static DECLCALLBACK(int) pdmR3DevHlp_PCIPhysWrite(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
-{
- PDMDEV_ASSERT_DEVINS(pDevIns);
- return PDMDevHlpPCIDevPhysWrite(pDevIns->Internal.s.pPciDeviceR3, GCPhys, pvBuf, cbWrite);
-}
-
-
/** @interface_method_impl{PDMDEVHLPR3,pfnPCIRegister} */
static DECLCALLBACK(int) pdmR3DevHlp_PCIRegister(PPDMDEVINS pDevIns, PPCIDEVICE pPciDev)
{
@@ -1351,6 +1376,54 @@ static DECLCALLBACK(void) pdmR3DevHlp_PCISetConfigCallbacks(PPDMDEVINS pDevIns,
}
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCIPhysRead} */
+static DECLCALLBACK(int) pdmR3DevHlp_PCIPhysRead(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+
+#ifndef PDM_DO_NOT_RESPECT_PCI_BM_BIT
+ /*
+ * Just check the busmaster setting here and forward the request to the generic read helper.
+ */
+ PPCIDEVICE pPciDev = pDevIns->Internal.s.pPciDeviceR3;
+ AssertReleaseMsg(pPciDev, ("No PCI device registered!\n"));
+
+ if (!PCIDevIsBusmaster(pPciDev))
+ {
+ Log(("pdmR3DevHlp_PCIPhysRead: caller='%s'/%d: returns %Rrc - Not bus master! GCPhys=%RGp cbRead=%#zx\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_PDM_NOT_PCI_BUS_MASTER, GCPhys, cbRead));
+ return VERR_PDM_NOT_PCI_BUS_MASTER;
+ }
+#endif
+
+ return pDevIns->pHlpR3->pfnPhysRead(pDevIns, GCPhys, pvBuf, cbRead);
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnPCIPhysRead} */
+static DECLCALLBACK(int) pdmR3DevHlp_PCIPhysWrite(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+
+#ifndef PDM_DO_NOT_RESPECT_PCI_BM_BIT
+ /*
+ * Just check the busmaster setting here and forward the request to the generic read helper.
+ */
+ PPCIDEVICE pPciDev = pDevIns->Internal.s.pPciDeviceR3;
+ AssertReleaseMsg(pPciDev, ("No PCI device registered!\n"));
+
+ if (!PCIDevIsBusmaster(pPciDev))
+ {
+ Log(("pdmR3DevHlp_PCIPhysWrite: caller='%s'/%d: returns %Rrc - Not bus master! GCPhys=%RGp cbWrite=%#zx\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, VERR_PDM_NOT_PCI_BUS_MASTER, GCPhys, cbWrite));
+ return VERR_PDM_NOT_PCI_BUS_MASTER;
+ }
+#endif
+
+ return pDevIns->pHlpR3->pfnPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);
+}
+
+
/** @interface_method_impl{PDMDEVHLPR3,pfnPCISetIrq} */
static DECLCALLBACK(void) pdmR3DevHlp_PCISetIrq(PPDMDEVINS pDevIns, int iIrq, int iLevel)
{
@@ -1424,10 +1497,10 @@ static DECLCALLBACK(int) pdmR3DevHlp_PCIRegisterMsi(PPDMDEVINS pDevIns, PPDMMSIR
PVM pVM = pDevIns->Internal.s.pVMR3;
pdmLock(pVM);
- if (!pBus->pfnRegisterMsiR3)
- rc = VERR_NOT_IMPLEMENTED;
- else
+ if (pBus->pfnRegisterMsiR3)
rc = pBus->pfnRegisterMsiR3(pBus->pDevInsR3, pPciDev, pMsiReg);
+ else
+ rc = VERR_NOT_IMPLEMENTED;
pdmUnlock(pVM);
}
else
@@ -1961,7 +2034,7 @@ static DECLCALLBACK(void) pdmR3DevHlp_DMASchedule(PPDMDEVINS pDevIns)
PVM pVM = pDevIns->Internal.s.pVMR3;
VM_ASSERT_EMT(pVM);
LogFlow(("pdmR3DevHlp_DMASchedule: caller='%s'/%d: VM_FF_PDM_DMA %d -> 1\n",
- pDevIns->pReg->szName, pDevIns->iInstance, VM_FF_ISSET(pVM, VM_FF_PDM_DMA)));
+ pDevIns->pReg->szName, pDevIns->iInstance, VM_FF_IS_SET(pVM, VM_FF_PDM_DMA)));
AssertMsg(pVM->pdm.s.pDmac, ("Configuration error: No DMAC controller available. This could be related to init order too!\n"));
VM_FF_SET(pVM, VM_FF_PDM_DMA);
@@ -1983,7 +2056,15 @@ static DECLCALLBACK(int) pdmR3DevHlp_CMOSWrite(PPDMDEVINS pDevIns, unsigned iReg
pDevIns->pReg->szName, pDevIns->iInstance, iReg, u8Value));
int rc;
if (pVM->pdm.s.pRtc)
- rc = pVM->pdm.s.pRtc->Reg.pfnWrite(pVM->pdm.s.pRtc->pDevIns, iReg, u8Value);
+ {
+ PPDMDEVINS pDevInsRtc = pVM->pdm.s.pRtc->pDevIns;
+ rc = PDMCritSectEnter(pDevInsRtc->pCritSectRoR3, VERR_IGNORED);
+ if (RT_SUCCESS(rc))
+ {
+ rc = pVM->pdm.s.pRtc->Reg.pfnWrite(pDevInsRtc, iReg, u8Value);
+ PDMCritSectLeave(pDevInsRtc->pCritSectRoR3);
+ }
+ }
else
rc = VERR_PDM_NO_RTC_INSTANCE;
@@ -2004,7 +2085,15 @@ static DECLCALLBACK(int) pdmR3DevHlp_CMOSRead(PPDMDEVINS pDevIns, unsigned iReg,
pDevIns->pReg->szName, pDevIns->iInstance, iReg, pu8Value));
int rc;
if (pVM->pdm.s.pRtc)
- rc = pVM->pdm.s.pRtc->Reg.pfnRead(pVM->pdm.s.pRtc->pDevIns, iReg, pu8Value);
+ {
+ PPDMDEVINS pDevInsRtc = pVM->pdm.s.pRtc->pDevIns;
+ rc = PDMCritSectEnter(pDevInsRtc->pCritSectRoR3, VERR_IGNORED);
+ if (RT_SUCCESS(rc))
+ {
+ rc = pVM->pdm.s.pRtc->Reg.pfnRead(pDevInsRtc, iReg, pu8Value);
+ PDMCritSectLeave(pDevInsRtc->pCritSectRoR3);
+ }
+ }
else
rc = VERR_PDM_NO_RTC_INSTANCE;
@@ -2125,7 +2214,7 @@ static DECLCALLBACK(int) pdmR3DevHlp_CallR0(PPDMDEVINS pDevIns, uint32_t uOperat
{
PDMDEV_ASSERT_DEVINS(pDevIns);
PVM pVM = pDevIns->Internal.s.pVMR3;
- VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ VM_ASSERT_EMT(pVM);
LogFlow(("pdmR3DevHlp_CallR0: caller='%s'/%d: uOperation=%#x u64Arg=%#RX64\n",
pDevIns->pReg->szName, pDevIns->iInstance, uOperation, u64Arg));
@@ -2169,6 +2258,41 @@ static DECLCALLBACK(int) pdmR3DevHlp_CallR0(PPDMDEVINS pDevIns, uint32_t uOperat
}
+/** @interface_method_impl{PDMDEVHLP,pfnVMGetSuspendReason} */
+static DECLCALLBACK(VMSUSPENDREASON) pdmR3DevHlp_VMGetSuspendReason(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ VMSUSPENDREASON enmReason = VMR3GetSuspendReason(pVM->pUVM);
+ LogFlow(("pdmR3DevHlp_VMGetSuspendReason: caller='%s'/%d: returns %d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, enmReason));
+ return enmReason;
+}
+
+
+/** @interface_method_impl{PDMDEVHLP,pfnVMGetResumeReason} */
+static DECLCALLBACK(VMRESUMEREASON) pdmR3DevHlp_VMGetResumeReason(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ VMRESUMEREASON enmReason = VMR3GetResumeReason(pVM->pUVM);
+ LogFlow(("pdmR3DevHlp_VMGetResumeReason: caller='%s'/%d: returns %d\n",
+ pDevIns->pReg->szName, pDevIns->iInstance, enmReason));
+ return enmReason;
+}
+
+
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetUVM} */
+static DECLCALLBACK(PUVM) pdmR3DevHlp_GetUVM(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ LogFlow(("pdmR3DevHlp_GetUVM: caller='%s'/%d: returns %p\n", pDevIns->pReg->szName, pDevIns->iInstance, pDevIns->Internal.s.pVMR3));
+ return pDevIns->Internal.s.pVMR3->pUVM;
+}
+
+
/** @interface_method_impl{PDMDEVHLPR3,pfnGetVM} */
static DECLCALLBACK(PVM) pdmR3DevHlp_GetVM(PPDMDEVINS pDevIns)
{
@@ -2194,11 +2318,11 @@ static DECLCALLBACK(int) pdmR3DevHlp_PCIBusRegister(PPDMDEVINS pDevIns, PPDMPCIB
PDMDEV_ASSERT_DEVINS(pDevIns);
PVM pVM = pDevIns->Internal.s.pVMR3;
VM_ASSERT_EMT(pVM);
- LogFlow(("pdmR3DevHlp_PCIBusRegister: caller='%s'/%d: pPciBusReg=%p:{.u32Version=%#x, .pfnRegisterR3=%p, .pfnIORegionRegisterR3=%p, .pfnSetIrqR3=%p, "
- ".pfnSaveExecR3=%p, .pfnLoadExecR3=%p, .pfnFakePCIBIOSR3=%p, .pszSetIrqRC=%p:{%s}, .pszSetIrqR0=%p:{%s}} ppPciHlpR3=%p\n",
+ LogFlow(("pdmR3DevHlp_PCIBusRegister: caller='%s'/%d: pPciBusReg=%p:{.u32Version=%#x, .pfnRegisterR3=%p, .pfnIORegionRegisterR3=%p, "
+ ".pfnSetIrqR3=%p, .pfnFakePCIBIOSR3=%p, .pszSetIrqRC=%p:{%s}, .pszSetIrqR0=%p:{%s}} ppPciHlpR3=%p\n",
pDevIns->pReg->szName, pDevIns->iInstance, pPciBusReg, pPciBusReg->u32Version, pPciBusReg->pfnRegisterR3,
- pPciBusReg->pfnIORegionRegisterR3, pPciBusReg->pfnSetIrqR3, pPciBusReg->pfnSaveExecR3, pPciBusReg->pfnLoadExecR3,
- pPciBusReg->pfnFakePCIBIOSR3, pPciBusReg->pszSetIrqRC, pPciBusReg->pszSetIrqRC, pPciBusReg->pszSetIrqR0, pPciBusReg->pszSetIrqR0, ppPciHlpR3));
+ pPciBusReg->pfnIORegionRegisterR3, pPciBusReg->pfnSetIrqR3, pPciBusReg->pfnFakePCIBIOSR3,
+ pPciBusReg->pszSetIrqRC, pPciBusReg->pszSetIrqRC, pPciBusReg->pszSetIrqR0, pPciBusReg->pszSetIrqR0, ppPciHlpR3));
/*
* Validate the structure.
@@ -2212,15 +2336,11 @@ static DECLCALLBACK(int) pdmR3DevHlp_PCIBusRegister(PPDMDEVINS pDevIns, PPDMPCIB
if ( !pPciBusReg->pfnRegisterR3
|| !pPciBusReg->pfnIORegionRegisterR3
|| !pPciBusReg->pfnSetIrqR3
- || !pPciBusReg->pfnSaveExecR3
- || !pPciBusReg->pfnLoadExecR3
|| (!pPciBusReg->pfnFakePCIBIOSR3 && !pVM->pdm.s.aPciBuses[0].pDevInsR3)) /* Only the first bus needs to do the BIOS work. */
{
Assert(pPciBusReg->pfnRegisterR3);
Assert(pPciBusReg->pfnIORegionRegisterR3);
Assert(pPciBusReg->pfnSetIrqR3);
- Assert(pPciBusReg->pfnSaveExecR3);
- Assert(pPciBusReg->pfnLoadExecR3);
Assert(pPciBusReg->pfnFakePCIBIOSR3);
LogFlow(("pdmR3DevHlp_PCIBusRegister: caller='%s'/%d: returns %Rrc (R3 callbacks)\n", pDevIns->pReg->szName, pDevIns->iInstance, VERR_INVALID_PARAMETER));
return VERR_INVALID_PARAMETER;
@@ -2311,8 +2431,6 @@ static DECLCALLBACK(int) pdmR3DevHlp_PCIBusRegister(PPDMDEVINS pDevIns, PPDMPCIB
pPciBus->pfnIORegionRegisterR3 = pPciBusReg->pfnIORegionRegisterR3;
pPciBus->pfnSetConfigCallbacksR3 = pPciBusReg->pfnSetConfigCallbacksR3;
pPciBus->pfnSetIrqR3 = pPciBusReg->pfnSetIrqR3;
- pPciBus->pfnSaveExecR3 = pPciBusReg->pfnSaveExecR3;
- pPciBus->pfnLoadExecR3 = pPciBusReg->pfnLoadExecR3;
pPciBus->pfnFakePCIBIOSR3 = pPciBusReg->pfnFakePCIBIOSR3;
Log(("PDM: Registered PCI bus device '%s'/%d pDevIns=%p\n", pDevIns->pReg->szName, pDevIns->iInstance, pDevIns));
@@ -3099,7 +3217,7 @@ static DECLCALLBACK(int) pdmR3DevHlp_RegisterVMMDevHeap(PPDMDEVINS pDevIns, RTGC
PDMDEV_ASSERT_DEVINS(pDevIns);
VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
- int rc = PDMR3RegisterVMMDevHeap(pDevIns->Internal.s.pVMR3, GCPhys, pvHeap, cbSize);
+ int rc = PDMR3VmmDevHeapRegister(pDevIns->Internal.s.pVMR3, GCPhys, pvHeap, cbSize);
return rc;
}
@@ -3112,7 +3230,7 @@ static DECLCALLBACK(int) pdmR3DevHlp_UnregisterVMMDevHeap(PPDMDEVINS pDevIns, RT
PDMDEV_ASSERT_DEVINS(pDevIns);
VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
- int rc = PDMR3UnregisterVMMDevHeap(pDevIns->Internal.s.pVMR3, GCPhys);
+ int rc = PDMR3VmmDevHeapUnregister(pDevIns->Internal.s.pVMR3, GCPhys);
return rc;
}
@@ -3124,7 +3242,7 @@ static DECLCALLBACK(int) pdmR3DevHlp_VMReset(PPDMDEVINS pDevIns)
PVM pVM = pDevIns->Internal.s.pVMR3;
VM_ASSERT_EMT(pVM);
LogFlow(("pdmR3DevHlp_VMReset: caller='%s'/%d: VM_FF_RESET %d -> 1\n",
- pDevIns->pReg->szName, pDevIns->iInstance, VM_FF_ISSET(pVM, VM_FF_RESET)));
+ pDevIns->pReg->szName, pDevIns->iInstance, VM_FF_IS_SET(pVM, VM_FF_RESET)));
/*
* We postpone this operation because we're likely to be inside a I/O instruction
@@ -3163,12 +3281,12 @@ static DECLCALLBACK(int) pdmR3DevHlp_VMSuspend(PPDMDEVINS pDevIns)
if (pVM->cCpus > 1)
{
/* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
- rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)VMR3Suspend, 1, pVM);
+ rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)VMR3Suspend, 2, pVM->pUVM, VMSUSPENDREASON_VM);
AssertRC(rc);
rc = VINF_EM_SUSPEND;
}
else
- rc = VMR3Suspend(pVM);
+ rc = VMR3Suspend(pVM->pUVM, VMSUSPENDREASON_VM);
LogFlow(("pdmR3DevHlp_VMSuspend: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
return rc;
@@ -3188,7 +3306,7 @@ static DECLCALLBACK(int) pdmR3DevHlp_VMSuspendSaveAndPowerOffWorker(PVM pVM, PPD
/*
* Suspend the VM first then do the saving.
*/
- int rc = VMR3Suspend(pVM);
+ int rc = VMR3Suspend(pVM->pUVM, VMSUSPENDREASON_VM);
if (RT_SUCCESS(rc))
{
PUVM pUVM = pVM->pUVM;
@@ -3199,7 +3317,7 @@ static DECLCALLBACK(int) pdmR3DevHlp_VMSuspendSaveAndPowerOffWorker(PVM pVM, PPD
*/
if (RT_SUCCESS(rc))
{
- rc = VMR3PowerOff(pVM);
+ rc = VMR3PowerOff(pVM->pUVM);
if (RT_FAILURE(rc))
LogRel(("%s/SSP: VMR3PowerOff failed: %Rrc\n", pDevIns->pReg->szName, rc));
}
@@ -3253,17 +3371,17 @@ static DECLCALLBACK(int) pdmR3DevHlp_VMPowerOff(PPDMDEVINS pDevIns)
/** @todo Always take the SMP path - fewer code paths. */
if (pVM->cCpus > 1)
{
- /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
- rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)VMR3PowerOff, 1, pVM);
+ /* We might be holding locks here and could cause a deadlock since
+ VMR3PowerOff rendezvous with the other CPUs. */
+ rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)VMR3PowerOff, 1, pVM->pUVM);
AssertRC(rc);
/* Set the VCPU state to stopped here as well to make sure no
- * inconsistency with the EM state occurs.
- */
+ inconsistency with the EM state occurs. */
VMCPU_SET_STATE(VMMGetCpu(pVM), VMCPUSTATE_STOPPED);
rc = VINF_EM_OFF;
}
else
- rc = VMR3PowerOff(pVM);
+ rc = VMR3PowerOff(pVM->pUVM);
LogFlow(("pdmR3DevHlp_VMPowerOff: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));
return rc;
@@ -3355,16 +3473,17 @@ const PDMDEVHLPR3 g_pdmR3DevHlpTrusted =
pdmR3DevHlp_VMSetRuntimeErrorV,
pdmR3DevHlp_DBGFStopV,
pdmR3DevHlp_DBGFInfoRegister,
+ pdmR3DevHlp_DBGFRegRegister,
pdmR3DevHlp_DBGFTraceBuf,
pdmR3DevHlp_STAMRegister,
pdmR3DevHlp_STAMRegisterF,
pdmR3DevHlp_STAMRegisterV,
- pdmR3DevHlp_PCIPhysRead,
- pdmR3DevHlp_PCIPhysWrite,
pdmR3DevHlp_PCIRegister,
pdmR3DevHlp_PCIRegisterMsi,
pdmR3DevHlp_PCIIORegionRegister,
pdmR3DevHlp_PCISetConfigCallbacks,
+ pdmR3DevHlp_PCIPhysRead,
+ pdmR3DevHlp_PCIPhysWrite,
pdmR3DevHlp_PCISetIrq,
pdmR3DevHlp_PCISetIrqNoWait,
pdmR3DevHlp_ISASetIrq,
@@ -3400,6 +3519,8 @@ const PDMDEVHLPR3 g_pdmR3DevHlpTrusted =
pdmR3DevHlp_LdrGetRCInterfaceSymbols,
pdmR3DevHlp_LdrGetR0InterfaceSymbols,
pdmR3DevHlp_CallR0,
+ pdmR3DevHlp_VMGetSuspendReason,
+ pdmR3DevHlp_VMGetResumeReason,
0,
0,
0,
@@ -3407,9 +3528,7 @@ const PDMDEVHLPR3 g_pdmR3DevHlpTrusted =
0,
0,
0,
- 0,
- 0,
- 0,
+ pdmR3DevHlp_GetUVM,
pdmR3DevHlp_GetVM,
pdmR3DevHlp_GetVMCPU,
pdmR3DevHlp_RegisterVMMDevHeap,
@@ -3424,12 +3543,22 @@ const PDMDEVHLPR3 g_pdmR3DevHlpTrusted =
pdmR3DevHlp_TMTimeVirtGet,
pdmR3DevHlp_TMTimeVirtGetFreq,
pdmR3DevHlp_TMTimeVirtGetNano,
+ pdmR3DevHlp_GetSupDrvSession,
PDM_DEVHLPR3_VERSION /* the end */
};
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetUVM} */
+static DECLCALLBACK(PUVM) pdmR3DevHlp_Untrusted_GetUVM(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return NULL;
+}
+
+
/** @interface_method_impl{PDMDEVHLPR3,pfnGetVM} */
static DECLCALLBACK(PVM) pdmR3DevHlp_Untrusted_GetVM(PPDMDEVINS pDevIns)
{
@@ -3532,6 +3661,15 @@ static DECLCALLBACK(void) pdmR3DevHlp_Untrusted_GetCpuId(PPDMDEVINS pDevIns, uin
}
+/** @interface_method_impl{PDMDEVHLPR3,pfnGetSupDrvSession} */
+static DECLCALLBACK(PSUPDRVSESSION) pdmR3DevHlp_Untrusted_GetSupDrvSession(PPDMDEVINS pDevIns)
+{
+ PDMDEV_ASSERT_DEVINS(pDevIns);
+ AssertReleaseMsgFailed(("Untrusted device called trusted helper! '%s'/%d\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ return (PSUPDRVSESSION)0;
+}
+
+
/**
* The device helper structure for non-trusted devices.
*/
@@ -3576,16 +3714,17 @@ const PDMDEVHLPR3 g_pdmR3DevHlpUnTrusted =
pdmR3DevHlp_VMSetRuntimeErrorV,
pdmR3DevHlp_DBGFStopV,
pdmR3DevHlp_DBGFInfoRegister,
+ pdmR3DevHlp_DBGFRegRegister,
pdmR3DevHlp_DBGFTraceBuf,
pdmR3DevHlp_STAMRegister,
pdmR3DevHlp_STAMRegisterF,
pdmR3DevHlp_STAMRegisterV,
- pdmR3DevHlp_PCIPhysRead,
- pdmR3DevHlp_PCIPhysWrite,
pdmR3DevHlp_PCIRegister,
pdmR3DevHlp_PCIRegisterMsi,
pdmR3DevHlp_PCIIORegionRegister,
pdmR3DevHlp_PCISetConfigCallbacks,
+ pdmR3DevHlp_PCIPhysRead,
+ pdmR3DevHlp_PCIPhysWrite,
pdmR3DevHlp_PCISetIrq,
pdmR3DevHlp_PCISetIrqNoWait,
pdmR3DevHlp_ISASetIrq,
@@ -3621,6 +3760,8 @@ const PDMDEVHLPR3 g_pdmR3DevHlpUnTrusted =
pdmR3DevHlp_LdrGetRCInterfaceSymbols,
pdmR3DevHlp_LdrGetR0InterfaceSymbols,
pdmR3DevHlp_CallR0,
+ pdmR3DevHlp_VMGetSuspendReason,
+ pdmR3DevHlp_VMGetResumeReason,
0,
0,
0,
@@ -3628,9 +3769,7 @@ const PDMDEVHLPR3 g_pdmR3DevHlpUnTrusted =
0,
0,
0,
- 0,
- 0,
- 0,
+ pdmR3DevHlp_Untrusted_GetUVM,
pdmR3DevHlp_Untrusted_GetVM,
pdmR3DevHlp_Untrusted_GetVMCPU,
pdmR3DevHlp_Untrusted_RegisterVMMDevHeap,
@@ -3645,6 +3784,7 @@ const PDMDEVHLPR3 g_pdmR3DevHlpUnTrusted =
pdmR3DevHlp_TMTimeVirtGet,
pdmR3DevHlp_TMTimeVirtGetFreq,
pdmR3DevHlp_TMTimeVirtGetNano,
+ pdmR3DevHlp_Untrusted_GetSupDrvSession,
PDM_DEVHLPR3_VERSION /* the end */
};
diff --git a/src/VBox/VMM/VMMR3/PDMDevMiscHlp.cpp b/src/VBox/VMM/VMMR3/PDMDevMiscHlp.cpp
index 21ec52ce..c6cadd82 100644
--- a/src/VBox/VMM/VMMR3/PDMDevMiscHlp.cpp
+++ b/src/VBox/VMM/VMMR3/PDMDevMiscHlp.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2012 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -23,6 +23,7 @@
#include "PDMInternal.h"
#include <VBox/vmm/pdm.h>
#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/hm.h>
#ifdef VBOX_WITH_REM
# include <VBox/vmm/rem.h>
#endif
@@ -63,7 +64,7 @@ static DECLCALLBACK(void) pdmR3PicHlp_SetInterruptFF(PPDMDEVINS pDevIns)
PVMCPU pVCpu = &pVM->aCpus[0]; /* for PIC we always deliver to CPU 0, MP use APIC */
LogFlow(("pdmR3PicHlp_SetInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_PIC %d -> 1\n",
- pDevIns->pReg->szName, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC)));
+ pDevIns->pReg->szName, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC)));
VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC);
#ifdef VBOX_WITH_REM
@@ -91,7 +92,7 @@ static DECLCALLBACK(void) pdmR3PicHlp_ClearInterruptFF(PPDMDEVINS pDevIns)
}
LogFlow(("pdmR3PicHlp_ClearInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_PIC %d -> 0\n",
- pDevIns->pReg->szName, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC)));
+ pDevIns->pReg->szName, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC)));
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC);
#ifdef VBOX_WITH_REM
@@ -120,11 +121,17 @@ static DECLCALLBACK(void) pdmR3PicHlp_Unlock(PPDMDEVINS pDevIns)
static DECLCALLBACK(PCPDMPICHLPRC) pdmR3PicHlp_GetRCHelpers(PPDMDEVINS pDevIns)
{
PDMDEV_ASSERT_DEVINS(pDevIns);
- VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
- RTRCPTR pRCHelpers = 0;
- int rc = PDMR3LdrGetSymbolRC(pDevIns->Internal.s.pVMR3, NULL, "g_pdmRCPicHlp", &pRCHelpers);
- AssertReleaseRC(rc);
- AssertRelease(pRCHelpers);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ RTRCPTR pRCHelpers = NIL_RTRCPTR;
+ if (!HMIsEnabled(pVM))
+ {
+ int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCPicHlp", &pRCHelpers);
+ AssertReleaseRC(rc);
+ AssertRelease(pRCHelpers);
+ }
+
LogFlow(("pdmR3PicHlp_GetRCHelpers: caller='%s'/%d: returns %RRv\n",
pDevIns->pReg->szName, pDevIns->iInstance, pRCHelpers));
return pRCHelpers;
@@ -135,9 +142,10 @@ static DECLCALLBACK(PCPDMPICHLPRC) pdmR3PicHlp_GetRCHelpers(PPDMDEVINS pDevIns)
static DECLCALLBACK(PCPDMPICHLPR0) pdmR3PicHlp_GetR0Helpers(PPDMDEVINS pDevIns)
{
PDMDEV_ASSERT_DEVINS(pDevIns);
- VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
PCPDMPICHLPR0 pR0Helpers = 0;
- int rc = PDMR3LdrGetSymbolR0(pDevIns->Internal.s.pVMR3, NULL, "g_pdmR0PicHlp", &pR0Helpers);
+ int rc = PDMR3LdrGetSymbolR0(pVM, NULL, "g_pdmR0PicHlp", &pR0Helpers);
AssertReleaseRC(rc);
AssertRelease(pR0Helpers);
LogFlow(("pdmR3PicHlp_GetR0Helpers: caller='%s'/%d: returns %RHv\n",
@@ -180,7 +188,7 @@ static DECLCALLBACK(void) pdmR3ApicHlp_SetInterruptFF(PPDMDEVINS pDevIns, PDMAPI
AssertReturnVoid(idCpu < pVM->cCpus);
LogFlow(("pdmR3ApicHlp_SetInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_APIC(%d) %d -> 1\n",
- pDevIns->pReg->szName, pDevIns->iInstance, idCpu, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC)));
+ pDevIns->pReg->szName, pDevIns->iInstance, idCpu, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC)));
switch (enmType)
{
@@ -217,7 +225,7 @@ static DECLCALLBACK(void) pdmR3ApicHlp_ClearInterruptFF(PPDMDEVINS pDevIns, PDMA
AssertReturnVoid(idCpu < pVM->cCpus);
LogFlow(("pdmR3ApicHlp_ClearInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_APIC(%d) %d -> 0\n",
- pDevIns->pReg->szName, pDevIns->iInstance, idCpu, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC)));
+ pDevIns->pReg->szName, pDevIns->iInstance, idCpu, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC)));
/* Note: NMI/SMI can't be cleared. */
switch (enmType)
@@ -315,11 +323,17 @@ static DECLCALLBACK(void) pdmR3ApicHlp_SendInitIpi(PPDMDEVINS pDevIns, VMCPUID i
static DECLCALLBACK(PCPDMAPICHLPRC) pdmR3ApicHlp_GetRCHelpers(PPDMDEVINS pDevIns)
{
PDMDEV_ASSERT_DEVINS(pDevIns);
- VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
- RTRCPTR pRCHelpers = 0;
- int rc = PDMR3LdrGetSymbolRC(pDevIns->Internal.s.pVMR3, NULL, "g_pdmRCApicHlp", &pRCHelpers);
- AssertReleaseRC(rc);
- AssertRelease(pRCHelpers);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ RTRCPTR pRCHelpers = NIL_RTRCPTR;
+ if (!HMIsEnabled(pVM))
+ {
+ int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCApicHlp", &pRCHelpers);
+ AssertReleaseRC(rc);
+ AssertRelease(pRCHelpers);
+ }
+
LogFlow(("pdmR3ApicHlp_GetRCHelpers: caller='%s'/%d: returns %RRv\n",
pDevIns->pReg->szName, pDevIns->iInstance, pRCHelpers));
return pRCHelpers;
@@ -330,9 +344,10 @@ static DECLCALLBACK(PCPDMAPICHLPRC) pdmR3ApicHlp_GetRCHelpers(PPDMDEVINS pDevIns
static DECLCALLBACK(PCPDMAPICHLPR0) pdmR3ApicHlp_GetR0Helpers(PPDMDEVINS pDevIns)
{
PDMDEV_ASSERT_DEVINS(pDevIns);
- VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
PCPDMAPICHLPR0 pR0Helpers = 0;
- int rc = PDMR3LdrGetSymbolR0(pDevIns->Internal.s.pVMR3, NULL, "g_pdmR0ApicHlp", &pR0Helpers);
+ int rc = PDMR3LdrGetSymbolR0(pVM, NULL, "g_pdmR0ApicHlp", &pR0Helpers);
AssertReleaseRC(rc);
AssertRelease(pR0Helpers);
LogFlow(("pdmR3ApicHlp_GetR0Helpers: caller='%s'/%d: returns %RHv\n",
@@ -439,11 +454,17 @@ static DECLCALLBACK(void) pdmR3IoApicHlp_Unlock(PPDMDEVINS pDevIns)
static DECLCALLBACK(PCPDMIOAPICHLPRC) pdmR3IoApicHlp_GetRCHelpers(PPDMDEVINS pDevIns)
{
PDMDEV_ASSERT_DEVINS(pDevIns);
- VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
- RTRCPTR pRCHelpers = 0;
- int rc = PDMR3LdrGetSymbolRC(pDevIns->Internal.s.pVMR3, NULL, "g_pdmRCIoApicHlp", &pRCHelpers);
- AssertReleaseRC(rc);
- AssertRelease(pRCHelpers);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ RTRCPTR pRCHelpers = NIL_RTRCPTR;
+ if (!HMIsEnabled(pVM))
+ {
+ int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCIoApicHlp", &pRCHelpers);
+ AssertReleaseRC(rc);
+ AssertRelease(pRCHelpers);
+ }
+
LogFlow(("pdmR3IoApicHlp_GetRCHelpers: caller='%s'/%d: returns %RRv\n",
pDevIns->pReg->szName, pDevIns->iInstance, pRCHelpers));
return pRCHelpers;
@@ -454,9 +475,10 @@ static DECLCALLBACK(PCPDMIOAPICHLPRC) pdmR3IoApicHlp_GetRCHelpers(PPDMDEVINS pDe
static DECLCALLBACK(PCPDMIOAPICHLPR0) pdmR3IoApicHlp_GetR0Helpers(PPDMDEVINS pDevIns)
{
PDMDEV_ASSERT_DEVINS(pDevIns);
- VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
PCPDMIOAPICHLPR0 pR0Helpers = 0;
- int rc = PDMR3LdrGetSymbolR0(pDevIns->Internal.s.pVMR3, NULL, "g_pdmR0IoApicHlp", &pR0Helpers);
+ int rc = PDMR3LdrGetSymbolR0(pVM, NULL, "g_pdmR0IoApicHlp", &pR0Helpers);
AssertReleaseRC(rc);
AssertRelease(pR0Helpers);
LogFlow(("pdmR3IoApicHlp_GetR0Helpers: caller='%s'/%d: returns %RHv\n",
@@ -546,11 +568,17 @@ static DECLCALLBACK(void) pdmR3PciHlp_Unlock(PPDMDEVINS pDevIns)
static DECLCALLBACK(PCPDMPCIHLPRC) pdmR3PciHlp_GetRCHelpers(PPDMDEVINS pDevIns)
{
PDMDEV_ASSERT_DEVINS(pDevIns);
- VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
- RTRCPTR pRCHelpers = 0;
- int rc = PDMR3LdrGetSymbolRC(pDevIns->Internal.s.pVMR3, NULL, "g_pdmRCPciHlp", &pRCHelpers);
- AssertReleaseRC(rc);
- AssertRelease(pRCHelpers);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ RTRCPTR pRCHelpers = NIL_RTRCPTR;
+ if (!HMIsEnabled(pVM))
+ {
+ int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCPciHlp", &pRCHelpers);
+ AssertReleaseRC(rc);
+ AssertRelease(pRCHelpers);
+ }
+
LogFlow(("pdmR3IoApicHlp_GetGCHelpers: caller='%s'/%d: returns %RRv\n",
pDevIns->pReg->szName, pDevIns->iInstance, pRCHelpers));
return pRCHelpers;
@@ -561,9 +589,10 @@ static DECLCALLBACK(PCPDMPCIHLPRC) pdmR3PciHlp_GetRCHelpers(PPDMDEVINS pDevIns)
static DECLCALLBACK(PCPDMPCIHLPR0) pdmR3PciHlp_GetR0Helpers(PPDMDEVINS pDevIns)
{
PDMDEV_ASSERT_DEVINS(pDevIns);
- VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
PCPDMPCIHLPR0 pR0Helpers = 0;
- int rc = PDMR3LdrGetSymbolR0(pDevIns->Internal.s.pVMR3, NULL, "g_pdmR0PciHlp", &pR0Helpers);
+ int rc = PDMR3LdrGetSymbolR0(pVM, NULL, "g_pdmR0PciHlp", &pR0Helpers);
AssertReleaseRC(rc);
AssertRelease(pR0Helpers);
LogFlow(("pdmR3IoApicHlp_GetR0Helpers: caller='%s'/%d: returns %RHv\n",
@@ -614,7 +643,7 @@ static DECLCALLBACK(int) pdmR3HpetHlp_SetLegacyMode(PPDMDEVINS pDevIns, bool fAc
for (i = 0; i < RT_ELEMENTS(s_apszDevsToNotify); i++)
{
PPDMIBASE pBase;
- rc = PDMR3QueryDevice(pDevIns->Internal.s.pVMR3, "i8254", 0, &pBase);
+ rc = PDMR3QueryDevice(pDevIns->Internal.s.pVMR3->pUVM, "i8254", 0, &pBase);
if (RT_SUCCESS(rc))
{
PPDMIHPETLEGACYNOTIFY pPort = PDMIBASE_QUERY_INTERFACE(pBase, PDMIHPETLEGACYNOTIFY);
@@ -668,11 +697,17 @@ static DECLCALLBACK(int) pdmR3HpetHlp_SetIrq(PPDMDEVINS pDevIns, int iIrq, int i
static DECLCALLBACK(PCPDMHPETHLPRC) pdmR3HpetHlp_GetRCHelpers(PPDMDEVINS pDevIns)
{
PDMDEV_ASSERT_DEVINS(pDevIns);
- VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
- RTRCPTR pRCHelpers = 0;
- int rc = PDMR3LdrGetSymbolRC(pDevIns->Internal.s.pVMR3, NULL, "g_pdmRCHpetHlp", &pRCHelpers);
- AssertReleaseRC(rc);
- AssertRelease(pRCHelpers);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
+ RTRCPTR pRCHelpers = NIL_RTRCPTR;
+ if (!HMIsEnabled(pVM))
+ {
+ int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCHpetHlp", &pRCHelpers);
+ AssertReleaseRC(rc);
+ AssertRelease(pRCHelpers);
+ }
+
LogFlow(("pdmR3HpetHlp_GetGCHelpers: caller='%s'/%d: returns %RRv\n",
pDevIns->pReg->szName, pDevIns->iInstance, pRCHelpers));
return pRCHelpers;
@@ -683,9 +718,10 @@ static DECLCALLBACK(PCPDMHPETHLPRC) pdmR3HpetHlp_GetRCHelpers(PPDMDEVINS pDevIns
static DECLCALLBACK(PCPDMHPETHLPR0) pdmR3HpetHlp_GetR0Helpers(PPDMDEVINS pDevIns)
{
PDMDEV_ASSERT_DEVINS(pDevIns);
- VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
PCPDMHPETHLPR0 pR0Helpers = 0;
- int rc = PDMR3LdrGetSymbolR0(pDevIns->Internal.s.pVMR3, NULL, "g_pdmR0HpetHlp", &pR0Helpers);
+ int rc = PDMR3LdrGetSymbolR0(pVM, NULL, "g_pdmR0HpetHlp", &pR0Helpers);
AssertReleaseRC(rc);
AssertRelease(pR0Helpers);
LogFlow(("pdmR3HpetHlp_GetR0Helpers: caller='%s'/%d: returns %RHv\n",
@@ -718,11 +754,17 @@ const PDMHPETHLPR3 g_pdmR3DevHpetHlp =
static DECLCALLBACK(PCPDMPCIRAWHLPRC) pdmR3PciRawHlp_GetRCHelpers(PPDMDEVINS pDevIns)
{
PDMDEV_ASSERT_DEVINS(pDevIns);
- VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+
RTRCPTR pRCHelpers = NIL_RTRCPTR;
- int rc = PDMR3LdrGetSymbolRC(pDevIns->Internal.s.pVMR3, NULL, "g_pdmRCPciRawHlp", &pRCHelpers);
- AssertReleaseRC(rc);
- AssertRelease(pRCHelpers);
+ if (!HMIsEnabled(pVM))
+ {
+ int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCPciRawHlp", &pRCHelpers);
+ AssertReleaseRC(rc);
+ AssertRelease(pRCHelpers);
+ }
+
LogFlow(("pdmR3PciRawHlp_GetGCHelpers: caller='%s'/%d: returns %RRv\n",
pDevIns->pReg->szName, pDevIns->iInstance, pRCHelpers));
return pRCHelpers;
@@ -733,9 +775,10 @@ static DECLCALLBACK(PCPDMPCIRAWHLPRC) pdmR3PciRawHlp_GetRCHelpers(PPDMDEVINS pDe
static DECLCALLBACK(PCPDMPCIRAWHLPR0) pdmR3PciRawHlp_GetR0Helpers(PPDMDEVINS pDevIns)
{
PDMDEV_ASSERT_DEVINS(pDevIns);
- VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);
+ PVM pVM = pDevIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
PCPDMHPETHLPR0 pR0Helpers = NIL_RTR0PTR;
- int rc = PDMR3LdrGetSymbolR0(pDevIns->Internal.s.pVMR3, NULL, "g_pdmR0PciRawHlp", &pR0Helpers);
+ int rc = PDMR3LdrGetSymbolR0(pVM, NULL, "g_pdmR0PciRawHlp", &pR0Helpers);
AssertReleaseRC(rc);
AssertRelease(pR0Helpers);
LogFlow(("pdmR3PciRawHlp_GetR0Helpers: caller='%s'/%d: returns %RHv\n",
diff --git a/src/VBox/VMM/VMMR3/PDMDevice.cpp b/src/VBox/VMM/VMMR3/PDMDevice.cpp
index 23c39ccb..e7e33ceb 100644
--- a/src/VBox/VMM/VMMR3/PDMDevice.cpp
+++ b/src/VBox/VMM/VMMR3/PDMDevice.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2010 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -25,12 +25,14 @@
#include <VBox/vmm/mm.h>
#include <VBox/vmm/pgm.h>
#include <VBox/vmm/iom.h>
+#include <VBox/vmm/hm.h>
#include <VBox/vmm/cfgm.h>
#ifdef VBOX_WITH_REM
# include <VBox/vmm/rem.h>
#endif
#include <VBox/vmm/dbgf.h>
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include <VBox/vmm/vmm.h>
#include <VBox/version.h>
@@ -128,9 +130,12 @@ int pdmR3DevInit(PVM pVM)
/*
* Get the RC & R0 devhlps and create the devhlp R3 task queue.
*/
- PCPDMDEVHLPRC pHlpRC;
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCDevHlp", &pHlpRC);
- AssertReleaseRCReturn(rc, rc);
+ PCPDMDEVHLPRC pHlpRC = NIL_RTRCPTR;
+ if (!HMIsEnabled(pVM))
+ {
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCDevHlp", &pHlpRC);
+ AssertReleaseRCReturn(rc, rc);
+ }
PCPDMDEVHLPR0 pHlpR0;
rc = PDMR3LdrGetSymbolR0(pVM, NULL, "g_pdmR0DevHlp", &pHlpR0);
@@ -352,7 +357,7 @@ int pdmR3DevInit(PVM pVM)
? MMHyperR3ToR0(pVM, pCritSect) : NIL_RTR0PTR;
rc = pdmR3CritSectInitDeviceAuto(pVM, pDevIns, pCritSect, RT_SRC_POS,
- "%s#%u Auto", pDevIns->pReg->szName, pDevIns->iInstance);
+ "%s#%uAuto", pDevIns->pReg->szName, pDevIns->iInstance);
AssertLogRelRCReturn(rc, rc);
/*
@@ -775,7 +780,7 @@ int pdmR3DevFindLun(PVM pVM, const char *pszDevice, unsigned iInstance, unsigned
* This is used to change drivers and suchlike at runtime.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszDevice Device name.
* @param iInstance Device instance.
* @param iLun The Logical Unit to obtain the interface of.
@@ -783,8 +788,11 @@ int pdmR3DevFindLun(PVM pVM, const char *pszDevice, unsigned iInstance, unsigned
* @param ppBase Where to store the base interface pointer. Optional.
* @thread EMT
*/
-VMMR3DECL(int) PDMR3DeviceAttach(PVM pVM, const char *pszDevice, unsigned iInstance, unsigned iLun, uint32_t fFlags, PPPDMIBASE ppBase)
+VMMR3DECL(int) PDMR3DeviceAttach(PUVM pUVM, const char *pszDevice, unsigned iInstance, unsigned iLun, uint32_t fFlags, PPPDMIBASE ppBase)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
VM_ASSERT_EMT(pVM);
LogFlow(("PDMR3DeviceAttach: pszDevice=%p:{%s} iInstance=%d iLun=%d fFlags=%#x ppBase=%p\n",
pszDevice, pszDevice, iInstance, iLun, fFlags, ppBase));
@@ -834,16 +842,16 @@ VMMR3DECL(int) PDMR3DeviceAttach(PVM pVM, const char *pszDevice, unsigned iInsta
* This is used to change drivers and suchlike at runtime.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszDevice Device name.
* @param iInstance Device instance.
* @param iLun The Logical Unit to obtain the interface of.
* @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
* @thread EMT
*/
-VMMR3DECL(int) PDMR3DeviceDetach(PVM pVM, const char *pszDevice, unsigned iInstance, unsigned iLun, uint32_t fFlags)
+VMMR3DECL(int) PDMR3DeviceDetach(PUVM pUVM, const char *pszDevice, unsigned iInstance, unsigned iLun, uint32_t fFlags)
{
- return PDMR3DriverDetach(pVM, pszDevice, iInstance, iLun, NULL, 0, fFlags);
+ return PDMR3DriverDetach(pUVM, pszDevice, iInstance, iLun, NULL, 0, fFlags);
}
@@ -879,7 +887,7 @@ VMMR3_INT_DECL(PPDMCRITSECT) PDMR3DevGetCritSect(PVM pVM, PPDMDEVINS pDevIns)
* below it.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszDevice Device name.
* @param iInstance Device instance.
* @param iLun The Logical Unit to obtain the interface of.
@@ -888,11 +896,14 @@ VMMR3_INT_DECL(PPDMCRITSECT) PDMR3DevGetCritSect(PVM pVM, PPDMDEVINS pDevIns)
*
* @thread EMT
*/
-VMMR3DECL(int) PDMR3DriverAttach(PVM pVM, const char *pszDevice, unsigned iInstance, unsigned iLun, uint32_t fFlags, PPPDMIBASE ppBase)
+VMMR3DECL(int) PDMR3DriverAttach(PUVM pUVM, const char *pszDevice, unsigned iInstance, unsigned iLun, uint32_t fFlags, PPPDMIBASE ppBase)
{
- VM_ASSERT_EMT(pVM);
LogFlow(("PDMR3DriverAttach: pszDevice=%p:{%s} iInstance=%d iLun=%d fFlags=%#x ppBase=%p\n",
pszDevice, pszDevice, iInstance, iLun, fFlags, ppBase));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_EMT(pVM);
if (ppBase)
*ppBase = NULL;
@@ -961,7 +972,7 @@ VMMR3DECL(int) PDMR3DriverAttach(PVM pVM, const char *pszDevice, unsigned iInsta
* pfnDetach callback (PDMDRVREG / PDMDEVREG).
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszDevice Device name.
* @param iDevIns Device instance.
* @param iLun The Logical Unit in which to look for the driver.
@@ -972,11 +983,14 @@ VMMR3DECL(int) PDMR3DriverAttach(PVM pVM, const char *pszDevice, unsigned iInsta
* @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
* @thread EMT
*/
-VMMR3DECL(int) PDMR3DriverDetach(PVM pVM, const char *pszDevice, unsigned iDevIns, unsigned iLun,
+VMMR3DECL(int) PDMR3DriverDetach(PUVM pUVM, const char *pszDevice, unsigned iDevIns, unsigned iLun,
const char *pszDriver, unsigned iOccurance, uint32_t fFlags)
{
LogFlow(("PDMR3DriverDetach: pszDevice=%p:{%s} iDevIns=%u iLun=%u pszDriver=%p:{%s} iOccurance=%u fFlags=%#x\n",
pszDevice, pszDevice, iDevIns, iLun, pszDriver, iOccurance, fFlags));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
VM_ASSERT_EMT(pVM);
AssertPtr(pszDevice);
AssertPtrNull(pszDriver);
@@ -1032,7 +1046,7 @@ VMMR3DECL(int) PDMR3DriverDetach(PVM pVM, const char *pszDevice, unsigned iDevIn
* thread.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszDevice Device name.
* @param iDevIns Device instance.
* @param iLun The Logical Unit in which to look for the driver.
@@ -1053,11 +1067,11 @@ VMMR3DECL(int) PDMR3DriverDetach(PVM pVM, const char *pszDevice, unsigned iDevIn
*
* @thread Any thread. The EMTs will be involved at some point though.
*/
-VMMR3DECL(int) PDMR3DriverReattach(PVM pVM, const char *pszDevice, unsigned iDevIns, unsigned iLun,
+VMMR3DECL(int) PDMR3DriverReattach(PUVM pUVM, const char *pszDevice, unsigned iDevIns, unsigned iLun,
const char *pszDriver, unsigned iOccurance, uint32_t fFlags,
PCFGMNODE pCfg, PPPDMIBASE ppBase)
{
- NOREF(pVM); NOREF(pszDevice); NOREF(iDevIns); NOREF(iLun); NOREF(pszDriver); NOREF(iOccurance);
+ NOREF(pUVM); NOREF(pszDevice); NOREF(iDevIns); NOREF(iLun); NOREF(pszDriver); NOREF(iOccurance);
NOREF(fFlags); NOREF(pCfg); NOREF(ppBase);
return VERR_NOT_IMPLEMENTED;
}
diff --git a/src/VBox/VMM/VMMR3/PDMDriver.cpp b/src/VBox/VMM/VMMR3/PDMDriver.cpp
index f971b2e2..80e4efc0 100644
--- a/src/VBox/VMM/VMMR3/PDMDriver.cpp
+++ b/src/VBox/VMM/VMMR3/PDMDriver.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2012 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -24,6 +24,7 @@
#include <VBox/vmm/pdm.h>
#include <VBox/vmm/mm.h>
#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/hm.h>
#include <VBox/vmm/vmm.h>
#include <VBox/sup.h>
#include <VBox/vmm/vm.h>
@@ -701,7 +702,7 @@ int pdmR3DrvInstantiate(PVM pVM, PCFGMNODE pNode, PPDMIBASE pBaseInterface, PPDM
pNew->Internal.s.pVMR0 = pDrv->pReg->fFlags & PDM_DRVREG_FLAGS_R0 ? pVM->pVMR0 : NIL_RTR0PTR;
pNew->Internal.s.pVMRC = pDrv->pReg->fFlags & PDM_DRVREG_FLAGS_RC ? pVM->pVMRC : NIL_RTRCPTR;
//pNew->Internal.s.fDetaching = false;
- pNew->Internal.s.fVMSuspended = true;
+ pNew->Internal.s.fVMSuspended = true; /** @todo: should be 'false', if driver is attached at runtime. */
//pNew->Internal.s.fVMReset = false;
pNew->Internal.s.fHyperHeap = fHyperHeap;
//pNew->Internal.s.pfnAsyncNotify = NULL;
@@ -721,9 +722,9 @@ int pdmR3DrvInstantiate(PVM pVM, PCFGMNODE pNode, PPDMIBASE pBaseInterface, PPDM
pNew->pvInstanceDataR0 = MMHyperR3ToR0(pVM, &pNew->achInstanceData[0]);
rc = PDMR3LdrGetSymbolR0(pVM, NULL, "g_pdmR0DrvHlp", &pNew->pHlpR0);
AssertReleaseRCReturn(rc, rc);
-
}
- if (pDrv->pReg->fFlags & PDM_DRVREG_FLAGS_RC)
+ if ( (pDrv->pReg->fFlags & PDM_DRVREG_FLAGS_RC)
+ && !HMIsEnabled(pVM))
{
pNew->pvInstanceDataR0 = MMHyperR3ToRC(pVM, &pNew->achInstanceData[0]);
rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCDrvHlp", &pNew->pHlpRC);
@@ -961,12 +962,17 @@ void pdmR3DrvDestroyChain(PPDMDRVINS pDrvIns, uint32_t fFlags)
AssertRC(rc);
/* PDM critsects. */
- rc = pdmR3CritSectDeleteDriver(pVM, pCur);
+ rc = pdmR3CritSectBothDeleteDriver(pVM, pCur);
AssertRC(rc);
/* Block caches. */
PDMR3BlkCacheReleaseDriver(pVM, pCur);
+#ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
+ /* Completion templates.*/
+ pdmR3AsyncCompletionTemplateDestroyDriver(pVM, pCur);
+#endif
+
/* Finally, the driver it self. */
bool fHyperHeap = pCur->Internal.s.fHyperHeap;
ASMMemFill32(pCur, RT_OFFSETOF(PDMDRVINS, achInstanceData[pCur->pReg->cbInstance]), 0xdeadd0d0);
@@ -1420,7 +1426,7 @@ static DECLCALLBACK(int) pdmR3DrvHlp_STAMDeregister(PPDMDRVINS pDrvIns, void *pv
PDMDRV_ASSERT_DRVINS(pDrvIns);
VM_ASSERT_EMT(pDrvIns->Internal.s.pVMR3);
- int rc = STAMR3DeregisterU(pDrvIns->Internal.s.pVMR3->pUVM, pvSample);
+ int rc = STAMR3DeregisterByAddr(pDrvIns->Internal.s.pVMR3->pUVM, pvSample);
AssertRC(rc);
return rc;
}
@@ -1544,7 +1550,7 @@ static DECLCALLBACK(int) pdmR3DrvHlp_AsyncCompletionTemplateCreate(PPDMDRVINS pD
LogFlow(("pdmR3DrvHlp_AsyncCompletionTemplateCreate: caller='%s'/%d: ppTemplate=%p pfnCompleted=%p pszDesc=%p:{%s}\n",
pDrvIns->pReg->szName, pDrvIns->iInstance, ppTemplate, pfnCompleted, pszDesc, pszDesc));
- int rc = PDMR3AsyncCompletionTemplateCreateDriver(pDrvIns->Internal.s.pVMR3, pDrvIns, ppTemplate, pfnCompleted, pvTemplateUser, pszDesc);
+ int rc = pdmR3AsyncCompletionTemplateCreateDriver(pDrvIns->Internal.s.pVMR3, pDrvIns, ppTemplate, pfnCompleted, pvTemplateUser, pszDesc);
LogFlow(("pdmR3DrvHlp_AsyncCompletionTemplateCreate: caller='%s'/%d: returns %Rrc *ppThread=%p\n", pDrvIns->pReg->szName,
pDrvIns->iInstance, rc, *ppTemplate));
@@ -1560,7 +1566,7 @@ static DECLCALLBACK(int) pdmR3DrvHlp_NetShaperAttach(PPDMDRVINS pDrvIns, const c
LogFlow(("pdmR3DrvHlp_NetShaperAttach: caller='%s'/%d: pFilter=%p pszBwGroup=%p:{%s}\n",
pDrvIns->pReg->szName, pDrvIns->iInstance, pFilter, pszBwGroup, pszBwGroup));
- int rc = PDMR3NsAttach(pDrvIns->Internal.s.pVMR3, pDrvIns, pszBwGroup, pFilter);
+ int rc = PDMR3NsAttach(pDrvIns->Internal.s.pVMR3->pUVM, pDrvIns, pszBwGroup, pFilter);
LogFlow(("pdmR3DrvHlp_NetShaperAttach: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName,
pDrvIns->iInstance, rc));
@@ -1575,7 +1581,7 @@ static DECLCALLBACK(int) pdmR3DrvHlp_NetShaperDetach(PPDMDRVINS pDrvIns, PPDMNSF
LogFlow(("pdmR3DrvHlp_NetShaperDetach: caller='%s'/%d: pFilter=%p\n",
pDrvIns->pReg->szName, pDrvIns->iInstance, pFilter));
- int rc = PDMR3NsDetach(pDrvIns->Internal.s.pVMR3, pDrvIns, pFilter);
+ int rc = PDMR3NsDetach(pDrvIns->Internal.s.pVMR3->pUVM, pDrvIns, pFilter);
LogFlow(("pdmR3DrvHlp_NetShaperDetach: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName,
pDrvIns->iInstance, rc));
@@ -1751,6 +1757,33 @@ static DECLCALLBACK(int) pdmR3DrvHlp_BlkCacheRetain(PPDMDRVINS pDrvIns, PPPDMBLK
}
+
+/** @interface_method_impl{PDMDRVHLP,pfnVMGetSuspendReason} */
+static DECLCALLBACK(VMSUSPENDREASON) pdmR3DrvHlp_VMGetSuspendReason(PPDMDRVINS pDrvIns)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ PVM pVM = pDrvIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ VMSUSPENDREASON enmReason = VMR3GetSuspendReason(pVM->pUVM);
+ LogFlow(("pdmR3DrvHlp_VMGetSuspendReason: caller='%s'/%d: returns %d\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, enmReason));
+ return enmReason;
+}
+
+
+/** @interface_method_impl{PDMDRVHLP,pfnVMGetResumeReason} */
+static DECLCALLBACK(VMRESUMEREASON) pdmR3DrvHlp_VMGetResumeReason(PPDMDRVINS pDrvIns)
+{
+ PDMDRV_ASSERT_DRVINS(pDrvIns);
+ PVM pVM = pDrvIns->Internal.s.pVMR3;
+ VM_ASSERT_EMT(pVM);
+ VMRESUMEREASON enmReason = VMR3GetResumeReason(pVM->pUVM);
+ LogFlow(("pdmR3DrvHlp_VMGetResumeReason: caller='%s'/%d: returns %d\n",
+ pDrvIns->pReg->szName, pDrvIns->iInstance, enmReason));
+ return enmReason;
+}
+
+
/**
* The driver helper structure.
*/
@@ -1798,6 +1831,18 @@ const PDMDRVHLPR3 g_pdmR3DrvHlp =
pdmR3DrvHlp_CallR0,
pdmR3DrvHlp_FTSetCheckpoint,
pdmR3DrvHlp_BlkCacheRetain,
+ pdmR3DrvHlp_VMGetSuspendReason,
+ pdmR3DrvHlp_VMGetResumeReason,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
PDM_DRVHLPR3_VERSION /* u32TheEnd */
};
diff --git a/src/VBox/VMM/VMMR3/PDMLdr.cpp b/src/VBox/VMM/VMMR3/PDMLdr.cpp
index 2e1969ac..34fbbbba 100644
--- a/src/VBox/VMM/VMMR3/PDMLdr.cpp
+++ b/src/VBox/VMM/VMMR3/PDMLdr.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2011 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -31,7 +31,7 @@
#include <VBox/sup.h>
#include <VBox/param.h>
#include <VBox/err.h>
-#include <VBox/vmm/hwaccm.h>
+#include <VBox/vmm/hm.h>
#include <VBox/VBoxTpG.h>
#include <VBox/log.h>
@@ -77,7 +77,7 @@ static char *pdmR3File(const char *pszFile, const char *pszDefaultExt, const
* @returns VBox status code.
* @param pUVM Pointer to the user mode VM structure.
*/
-VMMR3DECL(int) PDMR3LdrLoadVMMR0U(PUVM pUVM)
+VMMR3_INT_DECL(int) PDMR3LdrLoadVMMR0U(PUVM pUVM)
{
return pdmR3LoadR0U(pUVM, NULL, VMMR0_MAIN_MODULE_NAME, NULL);
}
@@ -95,16 +95,15 @@ VMMR3DECL(int) PDMR3LdrLoadVMMR0U(PUVM pUVM)
*/
int pdmR3LdrInitU(PUVM pUVM)
{
-#if defined(PDMLDR_FAKE_MODE) || !defined(VBOX_WITH_RAW_MODE)
- return VINF_SUCCESS;
-
-#else
-
+#if !defined(PDMLDR_FAKE_MODE) && defined(VBOX_WITH_RAW_MODE)
/*
- * Load the mandatory GC module, the VMMR0.r0 is loaded before VM creation.
+ * Load the mandatory RC module, the VMMR0.r0 is loaded before VM creation.
*/
- return PDMR3LdrLoadRC(pUVM->pVM, NULL, VMMGC_MAIN_MODULE_NAME);
+ PVM pVM = pUVM->pVM; AssertPtr(pVM);
+ if (!HMIsEnabled(pVM))
+ return PDMR3LdrLoadRC(pVM, NULL, VMMGC_MAIN_MODULE_NAME);
#endif
+ return VINF_SUCCESS;
}
@@ -177,7 +176,7 @@ void pdmR3LdrTermU(PUVM pUVM)
* @param pUVM Pointer to the user mode VM structure.
* @param offDelta Relocation delta relative to old location.
*/
-VMMR3DECL(void) PDMR3LdrRelocateU(PUVM pUVM, RTGCINTPTR offDelta)
+VMMR3_INT_DECL(void) PDMR3LdrRelocateU(PUVM pUVM, RTGCINTPTR offDelta)
{
#ifdef VBOX_WITH_RAW_MODE
LogFlow(("PDMR3LdrRelocate: offDelta=%RGv\n", offDelta));
@@ -216,8 +215,6 @@ VMMR3DECL(void) PDMR3LdrRelocateU(PUVM pUVM, RTGCINTPTR offDelta)
int rc = RTLdrRelocate(pCur->hLdrMod, pCur->pvBits, pCur->ImageBase, pCur->OldImageBase,
pdmR3GetImportRC, &Args);
AssertFatalMsgRC(rc, ("RTLdrRelocate failed, rc=%d\n", rc));
- DBGFR3ModuleRelocate(pUVM->pVM, pCur->OldImageBase, pCur->ImageBase, RTLdrSize(pCur->hLdrMod),
- pCur->szFilename, pCur->szName);
}
}
}
@@ -319,8 +316,8 @@ int pdmR3LoadR3U(PUVM pUVM, const char *pszFilename, const char *pszName)
return rc;
}
-
#ifdef VBOX_WITH_RAW_MODE
+
/**
* Resolve an external symbol during RTLdrGetBits() of a RC module.
*
@@ -451,6 +448,8 @@ VMMR3DECL(int) PDMR3LdrLoadRC(PVM pVM, const char *pszFilename, const char *pszN
* Validate input.
*/
AssertMsg(PDMCritSectIsInitialized(&pVM->pdm.s.CritSect), ("bad init order!\n"));
+ AssertReturn(!HMIsEnabled(pVM), VERR_PDM_HM_IPE);
+
PUVM pUVM = pVM->pUVM;
RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
PPDMMOD pCur = pUVM->pdm.s.pModules;
@@ -608,8 +607,8 @@ VMMR3DECL(int) PDMR3LdrLoadRC(PVM pVM, const char *pszFilename, const char *pszN
RTMemTmpFree(pszFile);
return rc;
}
-#endif /* VBOX_WITH_RAW_MODE */
+#endif /* VBOX_WITH_RAW_MODE */
/**
* Loads a module into the ring-0 context.
@@ -719,7 +718,7 @@ static int pdmR3LoadR0U(PUVM pUVM, const char *pszFilename, const char *pszName,
* ordinal value rather than a string pointer.
* @param ppvValue Where to store the symbol value.
*/
-VMMR3DECL(int) PDMR3LdrGetSymbolR3(PVM pVM, const char *pszModule, const char *pszSymbol, void **ppvValue)
+VMMR3_INT_DECL(int) PDMR3LdrGetSymbolR3(PVM pVM, const char *pszModule, const char *pszSymbol, void **ppvValue)
{
/*
* Validate input.
@@ -883,7 +882,8 @@ VMMR3DECL(int) PDMR3LdrGetSymbolR0Lazy(PVM pVM, const char *pszModule, const cha
VMMR3DECL(int) PDMR3LdrGetSymbolRC(PVM pVM, const char *pszModule, const char *pszSymbol, PRTRCPTR pRCPtrValue)
{
#if defined(PDMLDR_FAKE_MODE) || !defined(VBOX_WITH_RAW_MODE)
- *pRCPtrValue = 0xfeedf00d;
+ Assert(!HMIsEnabled(pVM));
+ *pRCPtrValue = NIL_RTRCPTR;
return VINF_SUCCESS;
#else
@@ -950,7 +950,8 @@ VMMR3DECL(int) PDMR3LdrGetSymbolRCLazy(PVM pVM, const char *pszModule, const cha
PRTRCPTR pRCPtrValue)
{
#if defined(PDMLDR_FAKE_MODE) || !defined(VBOX_WITH_RAW_MODE)
- *pRCPtrValue = 0xfeedf00d;
+ Assert(!HMIsEnabled(pVM));
+ *pRCPtrValue = NIL_RTRCPTR;
return VINF_SUCCESS;
#else
@@ -1358,10 +1359,10 @@ static int pdmR3LdrQueryModFromPC(PVM pVM, RTUINTPTR uPC, PDMMODTYPE enmType,
* @param cchNearSym2 Size of the buffer pointed to by pszNearSym2.
* @param pNearSym2 The address of pszNearSym2.
*/
-VMMR3DECL(int) PDMR3LdrQueryRCModFromPC(PVM pVM, RTRCPTR uPC,
- char *pszModName, size_t cchModName, PRTRCPTR pMod,
- char *pszNearSym1, size_t cchNearSym1, PRTRCPTR pNearSym1,
- char *pszNearSym2, size_t cchNearSym2, PRTRCPTR pNearSym2)
+VMMR3_INT_DECL(int) PDMR3LdrQueryRCModFromPC(PVM pVM, RTRCPTR uPC,
+ char *pszModName, size_t cchModName, PRTRCPTR pMod,
+ char *pszNearSym1, size_t cchNearSym1, PRTRCPTR pNearSym1,
+ char *pszNearSym2, size_t cchNearSym2, PRTRCPTR pNearSym2)
{
RTUINTPTR AddrMod = 0;
RTUINTPTR AddrNear1 = 0;
@@ -1402,10 +1403,10 @@ VMMR3DECL(int) PDMR3LdrQueryRCModFromPC(PVM pVM, RTRCPTR uPC,
* @param cchNearSym2 Size of the buffer pointed to by pszNearSym2. Optional.
* @param pNearSym2 The address of pszNearSym2. Optional.
*/
-VMMR3DECL(int) PDMR3LdrQueryR0ModFromPC(PVM pVM, RTR0PTR uPC,
- char *pszModName, size_t cchModName, PRTR0PTR pMod,
- char *pszNearSym1, size_t cchNearSym1, PRTR0PTR pNearSym1,
- char *pszNearSym2, size_t cchNearSym2, PRTR0PTR pNearSym2)
+VMMR3_INT_DECL(int) PDMR3LdrQueryR0ModFromPC(PVM pVM, RTR0PTR uPC,
+ char *pszModName, size_t cchModName, PRTR0PTR pMod,
+ char *pszNearSym1, size_t cchNearSym1, PRTR0PTR pNearSym1,
+ char *pszNearSym2, size_t cchNearSym2, PRTR0PTR pNearSym2)
{
RTUINTPTR AddrMod = 0;
RTUINTPTR AddrNear1 = 0;
@@ -1447,7 +1448,10 @@ VMMR3DECL(int) PDMR3LdrEnumModules(PVM pVM, PFNPDMR3ENUM pfnCallback, void *pvA
pCur->szName,
pCur->ImageBase,
pCur->eType == PDMMOD_TYPE_RC ? RTLdrSize(pCur->hLdrMod) : 0,
- pCur->eType == PDMMOD_TYPE_RC,
+ pCur->eType == PDMMOD_TYPE_RC ? PDMLDRCTX_RAW_MODE
+ : pCur->eType == PDMMOD_TYPE_R0 ? PDMLDRCTX_RING_0
+ : pCur->eType == PDMMOD_TYPE_R3 ? PDMLDRCTX_RING_3
+ : PDMLDRCTX_INVALID,
pvArg);
if (RT_FAILURE(rc))
break;
@@ -1551,20 +1555,24 @@ static PPDMMOD pdmR3LdrFindModule(PUVM pUVM, const char *pszModule, PDMMODTYPE e
* @param fRing0 Set if it's a ring-0 context interface, clear if
* it's raw-mode context interface.
*/
-VMMR3DECL(int) PDMR3LdrGetInterfaceSymbols(PVM pVM, void *pvInterface, size_t cbInterface,
- const char *pszModule, const char *pszSearchPath,
- const char *pszSymPrefix, const char *pszSymList,
- bool fRing0)
+VMMR3_INT_DECL(int) PDMR3LdrGetInterfaceSymbols(PVM pVM, void *pvInterface, size_t cbInterface,
+ const char *pszModule, const char *pszSearchPath,
+ const char *pszSymPrefix, const char *pszSymList,
+ bool fRing0)
{
+ bool const fNullRun = !fRing0 && HMIsEnabled(pVM);
+
/*
* Find the module.
*/
int rc = VINF_SUCCESS;
- PPDMMOD pModule = pdmR3LdrFindModule(pVM->pUVM,
- pszModule ? pszModule : fRing0 ? "VMMR0.r0" : "VMMGC.gc",
- fRing0 ? PDMMOD_TYPE_R0 : PDMMOD_TYPE_RC,
- true /*fLazy*/, pszSearchPath);
- if (pModule)
+ PPDMMOD pModule = NULL;
+ if (!fNullRun)
+ pModule = pdmR3LdrFindModule(pVM->pUVM,
+ pszModule ? pszModule : fRing0 ? "VMMR0.r0" : "VMMGC.gc",
+ fRing0 ? PDMMOD_TYPE_R0 : PDMMOD_TYPE_RC,
+ true /*fLazy*/, pszSearchPath);
+ if (pModule || fNullRun)
{
/* Prep the symbol name. */
char szSymbol[256];
@@ -1644,9 +1652,12 @@ VMMR3DECL(int) PDMR3LdrGetInterfaceSymbols(PVM pVM, void *pvInterface, size_t cb
if (fRing0)
{
- void *pvValue;
- rc = SUPR3GetSymbolR0((void *)(RTR0PTR)pModule->ImageBase, szSymbol, &pvValue);
- AssertMsgRCBreak(rc, ("Couldn't find symbol '%s' in module '%s'\n", szSymbol, pModule->szName));
+ void *pvValue = NULL;
+ if (!fNullRun)
+ {
+ rc = SUPR3GetSymbolR0((void *)(RTR0PTR)pModule->ImageBase, szSymbol, &pvValue);
+ AssertMsgRCBreak(rc, ("Couldn't find symbol '%s' in module '%s'\n", szSymbol, pModule->szName));
+ }
PRTR0PTR pValue = (PRTR0PTR)((uintptr_t)pvInterface + offInterface);
AssertMsgBreakStmt(offInterface + sizeof(*pValue) <= cbInterface,
@@ -1658,9 +1669,12 @@ VMMR3DECL(int) PDMR3LdrGetInterfaceSymbols(PVM pVM, void *pvInterface, size_t cb
}
else
{
- RTUINTPTR Value;
- rc = RTLdrGetSymbolEx(pModule->hLdrMod, pModule->pvBits, pModule->ImageBase, szSymbol, &Value);
- AssertMsgRCBreak(rc, ("Couldn't find symbol '%s' in module '%s'\n", szSymbol, pModule->szName));
+ RTUINTPTR Value = 0;
+ if (!fNullRun)
+ {
+ rc = RTLdrGetSymbolEx(pModule->hLdrMod, pModule->pvBits, pModule->ImageBase, szSymbol, &Value);
+ AssertMsgRCBreak(rc, ("Couldn't find symbol '%s' in module '%s'\n", szSymbol, pModule->szName));
+ }
PRTRCPTR pValue = (PRTRCPTR)((uintptr_t)pvInterface + offInterface);
AssertMsgBreakStmt(offInterface + sizeof(*pValue) <= cbInterface,
diff --git a/src/VBox/VMM/VMMR3/PDMNetShaper.cpp b/src/VBox/VMM/VMMR3/PDMNetShaper.cpp
index bdc8e31c..48fc435b 100644
--- a/src/VBox/VMM/VMMR3/PDMNetShaper.cpp
+++ b/src/VBox/VMM/VMMR3/PDMNetShaper.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2011-2012 Oracle Corporation
+ * Copyright (C) 2011-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -41,7 +41,7 @@
#include <iprt/string.h>
#include <VBox/vmm/pdmnetshaper.h>
-#include <VBox/vmm/pdmnetshaperint.h>
+#include "PDMNetShaperInternal.h"
/*******************************************************************************
@@ -56,32 +56,42 @@ typedef struct PDMNETSHAPER
/** Pointer to the VM. */
PVM pVM;
/** Critical section protecting all members below. */
- RTCRITSECT cs;
+ RTCRITSECT Lock;
/** Pending TX thread. */
- PPDMTHREAD hTxThread;
+ PPDMTHREAD pTxThread;
/** Pointer to the first bandwidth group. */
PPDMNSBWGROUP pBwGroupsHead;
} PDMNETSHAPER;
+/** Takes the shaper lock (asserts but doesn't return or anything on
+ * failure). */
+#define LOCK_NETSHAPER(a_pShaper) do { int rcShaper = RTCritSectEnter(&(a_pShaper)->Lock); AssertRC(rcShaper); } while (0)
+/** Takes the shaper lock, returns + asserts on failure. */
+#define LOCK_NETSHAPER_RETURN(a_pShaper) \
+ do { int rcShaper = RTCritSectEnter(&(a_pShaper)->Lock); AssertRCReturn(rcShaper, rcShaper); } while (0)
+/** Releases the shaper lock (asserts on failure). */
+#define UNLOCK_NETSHAPER(a_pShaper) do { int rcShaper = RTCritSectLeave(&(a_pShaper)->Lock); AssertRC(rcShaper); } while (0)
-static PPDMNSBWGROUP pdmNsBwGroupFindById(PPDMNETSHAPER pShaper, const char *pcszId)
+
+
+static PPDMNSBWGROUP pdmNsBwGroupFindById(PPDMNETSHAPER pShaper, const char *pszId)
{
PPDMNSBWGROUP pBwGroup = NULL;
- if (RT_VALID_PTR(pcszId))
+ if (RT_VALID_PTR(pszId))
{
- int rc = RTCritSectEnter(&pShaper->cs); AssertRC(rc);
+ LOCK_NETSHAPER(pShaper);
pBwGroup = pShaper->pBwGroupsHead;
while ( pBwGroup
- && RTStrCmp(pBwGroup->pszName, pcszId))
- pBwGroup = pBwGroup->pNext;
+ && RTStrCmp(pBwGroup->pszNameR3, pszId))
+ pBwGroup = pBwGroup->pNextR3;
- rc = RTCritSectLeave(&pShaper->cs); AssertRC(rc);
+ UNLOCK_NETSHAPER(pShaper);
}
return pBwGroup;
@@ -90,13 +100,13 @@ static PPDMNSBWGROUP pdmNsBwGroupFindById(PPDMNETSHAPER pShaper, const char *pcs
static void pdmNsBwGroupLink(PPDMNSBWGROUP pBwGroup)
{
- PPDMNETSHAPER pShaper = pBwGroup->pShaper;
- int rc = RTCritSectEnter(&pShaper->cs); AssertRC(rc);
+ PPDMNETSHAPER pShaper = pBwGroup->pShaperR3;
+ LOCK_NETSHAPER(pShaper);
- pBwGroup->pNext = pShaper->pBwGroupsHead;
+ pBwGroup->pNextR3 = pShaper->pBwGroupsHead;
pShaper->pBwGroupsHead = pBwGroup;
- rc = RTCritSectLeave(&pShaper->cs); AssertRC(rc);
+ UNLOCK_NETSHAPER(pShaper);
}
@@ -104,7 +114,7 @@ static void pdmNsBwGroupLink(PPDMNSBWGROUP pBwGroup)
static void pdmNsBwGroupUnlink(PPDMNSBWGROUP pBwGroup)
{
PPDMNETSHAPER pShaper = pBwGroup->pShaper;
- int rc = RTCritSectEnter(&pShaper->cs); AssertRC(rc);
+ LOCK_NETSHAPER(pShaper);
if (pBwGroup == pShaper->pBwGroupsHead)
pShaper->pBwGroupsHead = pBwGroup->pNext;
@@ -119,58 +129,56 @@ static void pdmNsBwGroupUnlink(PPDMNSBWGROUP pBwGroup)
pPrev->pNext = pBwGroup->pNext;
}
- rc = RTCritSectLeave(&pShaper->cs); AssertRC(rc);
+ UNLOCK_NETSHAPER(pShaper);
}
#endif
-static void pdmNsBwGroupSetLimit(PPDMNSBWGROUP pBwGroup, uint64_t cbTransferPerSecMax)
+static void pdmNsBwGroupSetLimit(PPDMNSBWGROUP pBwGroup, uint64_t cbPerSecMax)
{
- pBwGroup->cbTransferPerSecMax = cbTransferPerSecMax;
- pBwGroup->cbBucketSize = RT_MAX(PDM_NETSHAPER_MIN_BUCKET_SIZE,
- cbTransferPerSecMax * PDM_NETSHAPER_MAX_LATENCY / 1000);
- LogFlowFunc(("New rate limit is %llu bytes per second, adjusted bucket size to %d bytes\n",
- pBwGroup->cbTransferPerSecMax, pBwGroup->cbBucketSize));
+ pBwGroup->cbPerSecMax = cbPerSecMax;
+ pBwGroup->cbBucket = RT_MAX(PDM_NETSHAPER_MIN_BUCKET_SIZE, cbPerSecMax * PDM_NETSHAPER_MAX_LATENCY / 1000);
+ LogFlow(("pdmNsBwGroupSetLimit: New rate limit is %llu bytes per second, adjusted bucket size to %u bytes\n",
+ pBwGroup->cbPerSecMax, pBwGroup->cbBucket));
}
-static int pdmNsBwGroupCreate(PPDMNETSHAPER pShaper, const char *pcszBwGroup, uint64_t cbTransferPerSecMax)
+static int pdmNsBwGroupCreate(PPDMNETSHAPER pShaper, const char *pszBwGroup, uint64_t cbPerSecMax)
{
- LogFlowFunc(("pShaper=%#p pcszBwGroup=%#p{%s} cbTransferPerSecMax=%llu\n",
- pShaper, pcszBwGroup, pcszBwGroup, cbTransferPerSecMax));
+ LogFlow(("pdmNsBwGroupCreate: pShaper=%#p pszBwGroup=%#p{%s} cbPerSecMax=%llu\n", pShaper, pszBwGroup, pszBwGroup, cbPerSecMax));
AssertPtrReturn(pShaper, VERR_INVALID_POINTER);
- AssertPtrReturn(pcszBwGroup, VERR_INVALID_POINTER);
- AssertReturn(*pcszBwGroup != '\0', VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pszBwGroup, VERR_INVALID_POINTER);
+ AssertReturn(*pszBwGroup != '\0', VERR_INVALID_PARAMETER);
int rc;
- PPDMNSBWGROUP pBwGroup = pdmNsBwGroupFindById(pShaper, pcszBwGroup);
+ PPDMNSBWGROUP pBwGroup = pdmNsBwGroupFindById(pShaper, pszBwGroup);
if (!pBwGroup)
{
rc = MMHyperAlloc(pShaper->pVM, sizeof(PDMNSBWGROUP), 64,
MM_TAG_PDM_NET_SHAPER, (void **)&pBwGroup);
if (RT_SUCCESS(rc))
{
- rc = PDMR3CritSectInit(pShaper->pVM, &pBwGroup->cs, RT_SRC_POS, "BWGRP");
+ rc = PDMR3CritSectInit(pShaper->pVM, &pBwGroup->Lock, RT_SRC_POS, "BWGRP");
if (RT_SUCCESS(rc))
{
- pBwGroup->pszName = RTStrDup(pcszBwGroup);
- if (pBwGroup->pszName)
+ pBwGroup->pszNameR3 = MMR3HeapStrDup(pShaper->pVM, MM_TAG_PDM_NET_SHAPER, pszBwGroup);
+ if (pBwGroup->pszNameR3)
{
- pBwGroup->pShaper = pShaper;
+ pBwGroup->pShaperR3 = pShaper;
pBwGroup->cRefs = 0;
- pdmNsBwGroupSetLimit(pBwGroup, cbTransferPerSecMax);
+ pdmNsBwGroupSetLimit(pBwGroup, cbPerSecMax);
- pBwGroup->cbTokensLast = pBwGroup->cbBucketSize;
+ pBwGroup->cbTokensLast = pBwGroup->cbBucket;
pBwGroup->tsUpdatedLast = RTTimeSystemNanoTS();
- LogFlowFunc(("pcszBwGroup={%s} cbBucketSize=%u\n",
- pcszBwGroup, pBwGroup->cbBucketSize));
+ LogFlowFunc(("pszBwGroup={%s} cbBucket=%u\n",
+ pszBwGroup, pBwGroup->cbBucket));
pdmNsBwGroupLink(pBwGroup);
return VINF_SUCCESS;
}
- PDMR3CritSectDelete(&pBwGroup->cs);
+ PDMR3CritSectDelete(&pBwGroup->Lock);
}
MMHyperFree(pShaper->pVM, pBwGroup);
}
@@ -188,8 +196,8 @@ static int pdmNsBwGroupCreate(PPDMNETSHAPER pShaper, const char *pcszBwGroup, ui
static void pdmNsBwGroupTerminate(PPDMNSBWGROUP pBwGroup)
{
Assert(pBwGroup->cRefs == 0);
- if (PDMCritSectIsInitialized(&pBwGroup->cs))
- PDMR3CritSectDelete(&pBwGroup->cs);
+ if (PDMCritSectIsInitialized(&pBwGroup->Lock))
+ PDMR3CritSectDelete(&pBwGroup->Lock);
}
@@ -214,41 +222,41 @@ static void pdmNsBwGroupXmitPending(PPDMNSBWGROUP pBwGroup)
* held.
*/
AssertPtr(pBwGroup);
- AssertPtr(pBwGroup->pShaper);
- Assert(RTCritSectIsOwner(&pBwGroup->pShaper->cs));
- //int rc = RTCritSectEnter(&pBwGroup->cs); AssertRC(rc);
+ AssertPtr(pBwGroup->pShaperR3);
+ Assert(RTCritSectIsOwner(&pBwGroup->pShaperR3->Lock));
+ //LOCK_NETSHAPER(pShaper);
/* Check if the group is disabled. */
- if (pBwGroup->cbTransferPerSecMax == 0)
+ if (pBwGroup->cbPerSecMax == 0)
return;
- PPDMNSFILTER pFilter = pBwGroup->pFiltersHead;
+ PPDMNSFILTER pFilter = pBwGroup->pFiltersHeadR3;
while (pFilter)
{
bool fChoked = ASMAtomicXchgBool(&pFilter->fChoked, false);
Log3((LOG_FN_FMT ": pFilter=%#p fChoked=%RTbool\n", __PRETTY_FUNCTION__, pFilter, fChoked));
- if (fChoked && pFilter->pIDrvNet)
+ if (fChoked && pFilter->pIDrvNetR3)
{
LogFlowFunc(("Calling pfnXmitPending for pFilter=%#p\n", pFilter));
- pFilter->pIDrvNet->pfnXmitPending(pFilter->pIDrvNet);
+ pFilter->pIDrvNetR3->pfnXmitPending(pFilter->pIDrvNetR3);
}
- pFilter = pFilter->pNext;
+ pFilter = pFilter->pNextR3;
}
- //rc = RTCritSectLeave(&pBwGroup->cs); AssertRC(rc);
+ //UNLOCK_NETSHAPER(pShaper);
}
static void pdmNsFilterLink(PPDMNSFILTER pFilter)
{
PPDMNSBWGROUP pBwGroup = pFilter->pBwGroupR3;
- int rc = PDMCritSectEnter(&pBwGroup->cs, VERR_SEM_BUSY); AssertRC(rc);
+ int rc = PDMCritSectEnter(&pBwGroup->Lock, VERR_SEM_BUSY); AssertRC(rc);
- pFilter->pNext = pBwGroup->pFiltersHead;
- pBwGroup->pFiltersHead = pFilter;
+ pFilter->pNextR3 = pBwGroup->pFiltersHeadR3;
+ pBwGroup->pFiltersHeadR3 = pFilter;
- rc = PDMCritSectLeave(&pBwGroup->cs); AssertRC(rc);
+ rc = PDMCritSectLeave(&pBwGroup->Lock); AssertRC(rc);
}
@@ -261,117 +269,135 @@ static void pdmNsFilterUnlink(PPDMNSFILTER pFilter)
* of group's filters.
*/
AssertPtr(pBwGroup);
- AssertPtr(pBwGroup->pShaper);
- Assert(RTCritSectIsOwner(&pBwGroup->pShaper->cs));
- int rc = PDMCritSectEnter(&pBwGroup->cs, VERR_SEM_BUSY); AssertRC(rc);
+ AssertPtr(pBwGroup->pShaperR3);
+ Assert(RTCritSectIsOwner(&pBwGroup->pShaperR3->Lock));
+ int rc = PDMCritSectEnter(&pBwGroup->Lock, VERR_SEM_BUSY); AssertRC(rc);
- if (pFilter == pBwGroup->pFiltersHead)
- pBwGroup->pFiltersHead = pFilter->pNext;
+ if (pFilter == pBwGroup->pFiltersHeadR3)
+ pBwGroup->pFiltersHeadR3 = pFilter->pNextR3;
else
{
- PPDMNSFILTER pPrev = pBwGroup->pFiltersHead;
+ PPDMNSFILTER pPrev = pBwGroup->pFiltersHeadR3;
while ( pPrev
- && pPrev->pNext != pFilter)
- pPrev = pPrev->pNext;
+ && pPrev->pNextR3 != pFilter)
+ pPrev = pPrev->pNextR3;
AssertPtr(pPrev);
- pPrev->pNext = pFilter->pNext;
+ pPrev->pNextR3 = pFilter->pNextR3;
}
- rc = PDMCritSectLeave(&pBwGroup->cs); AssertRC(rc);
+ rc = PDMCritSectLeave(&pBwGroup->Lock); AssertRC(rc);
}
-VMMR3DECL(int) PDMR3NsAttach(PVM pVM, PPDMDRVINS pDrvIns, const char *pcszBwGroup,
- PPDMNSFILTER pFilter)
+/**
+ * Attach network filter driver from bandwidth group.
+ *
+ * @returns VBox status code.
+ * @param pVM Handle of VM.
+ * @param pDrvIns The driver instance.
+ * @param pszBwGroup Name of the bandwidth group to attach to.
+ * @param pFilter Pointer to the filter we attach.
+ */
+VMMR3_INT_DECL(int) PDMR3NsAttach(PUVM pUVM, PPDMDRVINS pDrvIns, const char *pszBwGroup, PPDMNSFILTER pFilter)
{
- VM_ASSERT_EMT(pVM);
+ VM_ASSERT_EMT(pUVM->pVM);
AssertPtrReturn(pFilter, VERR_INVALID_POINTER);
AssertReturn(pFilter->pBwGroupR3 == NULL, VERR_ALREADY_EXISTS);
-
- PUVM pUVM = pVM->pUVM;
PPDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper;
+ LOCK_NETSHAPER_RETURN(pShaper);
- PPDMNSBWGROUP pBwGroupOld = NULL;
- PPDMNSBWGROUP pBwGroupNew = NULL;
+ int rc = VINF_SUCCESS;
+ PPDMNSBWGROUP pBwGroupNew = NULL;
+ if (pszBwGroup)
+ {
+ pBwGroupNew = pdmNsBwGroupFindById(pShaper, pszBwGroup);
+ if (pBwGroupNew)
+ pdmNsBwGroupRef(pBwGroupNew);
+ else
+ rc = VERR_NOT_FOUND;
+ }
- int rc = RTCritSectEnter(&pShaper->cs); AssertRC(rc);
if (RT_SUCCESS(rc))
{
- if (pcszBwGroup)
- {
- pBwGroupNew = pdmNsBwGroupFindById(pShaper, pcszBwGroup);
- if (pBwGroupNew)
- pdmNsBwGroupRef(pBwGroupNew);
- else
- rc = VERR_NOT_FOUND;
- }
-
- if (RT_SUCCESS(rc))
- {
- pBwGroupOld = ASMAtomicXchgPtrT(&pFilter->pBwGroupR3, pBwGroupNew, PPDMNSBWGROUP);
- ASMAtomicWritePtr(&pFilter->pBwGroupR0, MMHyperR3ToR0(pVM, pBwGroupNew));
- if (pBwGroupOld)
- pdmNsBwGroupUnref(pBwGroupOld);
- pdmNsFilterLink(pFilter);
- }
- int rc2 = RTCritSectLeave(&pShaper->cs); AssertRC(rc2);
+ PPDMNSBWGROUP pBwGroupOld = ASMAtomicXchgPtrT(&pFilter->pBwGroupR3, pBwGroupNew, PPDMNSBWGROUP);
+ ASMAtomicWritePtr(&pFilter->pBwGroupR0, MMHyperR3ToR0(pUVM->pVM, pBwGroupNew));
+ if (pBwGroupOld)
+ pdmNsBwGroupUnref(pBwGroupOld);
+ pdmNsFilterLink(pFilter);
}
+ UNLOCK_NETSHAPER(pShaper);
return rc;
}
-VMMR3DECL(int) PDMR3NsDetach(PVM pVM, PPDMDRVINS pDrvIns, PPDMNSFILTER pFilter)
+/**
+ * Detach network filter driver from bandwidth group.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pDrvIns The driver instance.
+ * @param pFilter Pointer to the filter we detach.
+ */
+VMMR3_INT_DECL(int) PDMR3NsDetach(PUVM pUVM, PPDMDRVINS pDrvIns, PPDMNSFILTER pFilter)
{
- VM_ASSERT_EMT(pVM);
+ VM_ASSERT_EMT(pUVM->pVM);
AssertPtrReturn(pFilter, VERR_INVALID_POINTER);
+ /* Now, return quietly if the filter isn't attached since driver/device
+ destructors are called on constructor failure. */
+ if (!pFilter->pBwGroupR3)
+ return VINF_SUCCESS;
AssertPtrReturn(pFilter->pBwGroupR3, VERR_INVALID_POINTER);
- PUVM pUVM = pVM->pUVM;
PPDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper;
+ LOCK_NETSHAPER_RETURN(pShaper);
- int rc = RTCritSectEnter(&pShaper->cs); AssertRC(rc);
- if (RT_SUCCESS(rc))
- {
- pdmNsFilterUnlink(pFilter);
- PPDMNSBWGROUP pBwGroup = NULL;
- pBwGroup = ASMAtomicXchgPtrT(&pFilter->pBwGroupR3, NULL, PPDMNSBWGROUP);
- if (pBwGroup)
- pdmNsBwGroupUnref(pBwGroup);
- int rc2 = RTCritSectLeave(&pShaper->cs); AssertRC(rc2);
- }
- return rc;
-}
-
+ pdmNsFilterUnlink(pFilter);
+ PPDMNSBWGROUP pBwGroup = ASMAtomicXchgPtrT(&pFilter->pBwGroupR3, NULL, PPDMNSBWGROUP);
+ if (pBwGroup)
+ pdmNsBwGroupUnref(pBwGroup);
-VMMR3DECL(bool) PDMR3NsAllocateBandwidth(PPDMNSFILTER pFilter, size_t cbTransfer)
-{
- return pdmNsAllocateBandwidth(pFilter, cbTransfer);
+ UNLOCK_NETSHAPER(pShaper);
+ return VINF_SUCCESS;
}
-VMMR3DECL(int) PDMR3NsBwGroupSetLimit(PVM pVM, const char *pcszBwGroup, uint64_t cbTransferPerSecMax)
+/**
+ * Adjusts the maximum rate for the bandwidth group.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszBwGroup Name of the bandwidth group to attach to.
+ * @param cbPerSecMax Maximum number of bytes per second to be transmitted.
+ */
+VMMR3DECL(int) PDMR3NsBwGroupSetLimit(PUVM pUVM, const char *pszBwGroup, uint64_t cbPerSecMax)
{
- PUVM pUVM = pVM->pUVM;
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
PPDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper;
+ LOCK_NETSHAPER_RETURN(pShaper);
- int rc = RTCritSectEnter(&pShaper->cs); AssertRC(rc);
- if (RT_SUCCESS(rc))
+ int rc;
+ PPDMNSBWGROUP pBwGroup = pdmNsBwGroupFindById(pShaper, pszBwGroup);
+ if (pBwGroup)
{
- PPDMNSBWGROUP pBwGroup = pdmNsBwGroupFindById(pShaper, pcszBwGroup);
- if (pBwGroup)
+ rc = PDMCritSectEnter(&pBwGroup->Lock, VERR_SEM_BUSY); AssertRC(rc);
+ if (RT_SUCCESS(rc))
{
- rc = PDMCritSectEnter(&pBwGroup->cs, VERR_SEM_BUSY); AssertRC(rc);
- pdmNsBwGroupSetLimit(pBwGroup, cbTransferPerSecMax);
+ pdmNsBwGroupSetLimit(pBwGroup, cbPerSecMax);
+
/* Drop extra tokens */
- if (pBwGroup->cbTokensLast > pBwGroup->cbBucketSize)
- pBwGroup->cbTokensLast = pBwGroup->cbBucketSize;
- rc = PDMCritSectLeave(&pBwGroup->cs); AssertRC(rc);
+ if (pBwGroup->cbTokensLast > pBwGroup->cbBucket)
+ pBwGroup->cbTokensLast = pBwGroup->cbBucket;
+
+ int rc2 = PDMCritSectLeave(&pBwGroup->Lock); AssertRC(rc2);
}
- rc = RTCritSectLeave(&pShaper->cs); AssertRC(rc);
}
+ else
+ rc = VERR_NOT_FOUND;
+
+ UNLOCK_NETSHAPER(pShaper);
return rc;
}
@@ -390,15 +416,16 @@ static DECLCALLBACK(int) pdmR3NsTxThread(PVM pVM, PPDMTHREAD pThread)
while (pThread->enmState == PDMTHREADSTATE_RUNNING)
{
RTThreadSleep(PDM_NETSHAPER_MAX_LATENCY);
+
/* Go over all bandwidth groups/filters calling pfnXmitPending */
- int rc = RTCritSectEnter(&pShaper->cs); AssertRC(rc);
+ LOCK_NETSHAPER(pShaper);
PPDMNSBWGROUP pBwGroup = pShaper->pBwGroupsHead;
while (pBwGroup)
{
pdmNsBwGroupXmitPending(pBwGroup);
- pBwGroup = pBwGroup->pNext;
+ pBwGroup = pBwGroup->pNextR3;
}
- rc = RTCritSectLeave(&pShaper->cs); AssertRC(rc);
+ UNLOCK_NETSHAPER(pShaper);
}
return VINF_SUCCESS;
}
@@ -436,12 +463,13 @@ int pdmR3NetShaperTerm(PVM pVM)
while (pBwGroup)
{
PPDMNSBWGROUP pFree = pBwGroup;
- pBwGroup = pBwGroup->pNext;
+ pBwGroup = pBwGroup->pNextR3;
pdmNsBwGroupTerminate(pFree);
+ MMR3HeapFree(pFree->pszNameR3);
MMHyperFree(pVM, pFree);
}
- RTCritSectDelete(&pShaper->cs);
+ RTCritSectDelete(&pShaper->Lock);
return VINF_SUCCESS;
}
@@ -454,27 +482,23 @@ int pdmR3NetShaperTerm(PVM pVM)
*/
int pdmR3NetShaperInit(PVM pVM)
{
- LogFlowFunc((": pVM=%p\n", pVM));
-
+ LogFlow(("pdmR3NetShaperInit: pVM=%p\n", pVM));
VM_ASSERT_EMT(pVM);
+ PUVM pUVM = pVM->pUVM;
+ AssertMsgReturn(!pUVM->pdm.s.pNetShaper, ("Network shaper was already initialized\n"), VERR_WRONG_ORDER);
- PPDMNETSHAPER pNetShaper = NULL;
-
- int rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_NET_SHAPER,
- sizeof(PDMNETSHAPER),
- (void **)&pNetShaper);
+ PPDMNETSHAPER pShaper;
+ int rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_NET_SHAPER, sizeof(PDMNETSHAPER), (void **)&pShaper);
if (RT_SUCCESS(rc))
{
- PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
- PCFGMNODE pCfgNetShaper = CFGMR3GetChild(CFGMR3GetChild(pCfgRoot, "PDM"), "NetworkShaper");
+ PCFGMNODE pCfgNetShaper = CFGMR3GetChild(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "NetworkShaper");
- pNetShaper->pVM = pVM;
- rc = RTCritSectInit(&pNetShaper->cs);
+ pShaper->pVM = pVM;
+ rc = RTCritSectInit(&pShaper->Lock);
if (RT_SUCCESS(rc))
{
/* Create all bandwidth groups. */
PCFGMNODE pCfgBwGrp = CFGMR3GetChild(pCfgNetShaper, "BwGroups");
-
if (pCfgBwGrp)
{
for (PCFGMNODE pCur = CFGMR3GetFirstChild(pCfgBwGrp); pCur; pCur = CFGMR3GetNextChild(pCur))
@@ -495,7 +519,7 @@ int pdmR3NetShaperInit(PVM pVM)
if (RT_SUCCESS(rc))
rc = CFGMR3QueryU64(pCur, "Max", &cbMax);
if (RT_SUCCESS(rc))
- rc = pdmNsBwGroupCreate(pNetShaper, pszBwGrpId, cbMax);
+ rc = pdmNsBwGroupCreate(pShaper, pszBwGrpId, cbMax);
RTMemFree(pszBwGrpId);
@@ -506,29 +530,22 @@ int pdmR3NetShaperInit(PVM pVM)
if (RT_SUCCESS(rc))
{
- PUVM pUVM = pVM->pUVM;
- AssertMsg(!pUVM->pdm.s.pNetShaper, ("Network shaper was already initialized\n"));
-
- char szDesc[64];
- static unsigned s_iThread;
-
- RTStrPrintf(szDesc, sizeof(szDesc), "PDMNsTx-%d", ++s_iThread);
- rc = PDMR3ThreadCreate(pVM, &pNetShaper->hTxThread, pNetShaper,
- pdmR3NsTxThread, pdmR3NsTxWakeUp, 0,
- RTTHREADTYPE_IO, szDesc);
+ rc = PDMR3ThreadCreate(pVM, &pShaper->pTxThread, pShaper, pdmR3NsTxThread, pdmR3NsTxWakeUp,
+ 0 /*cbStack*/, RTTHREADTYPE_IO, "PDMNsTx");
if (RT_SUCCESS(rc))
{
- pUVM->pdm.s.pNetShaper = pNetShaper;
+ pUVM->pdm.s.pNetShaper = pShaper;
return VINF_SUCCESS;
}
}
- RTCritSectDelete(&pNetShaper->cs);
+ RTCritSectDelete(&pShaper->Lock);
}
- MMR3HeapFree(pNetShaper);
+
+ MMR3HeapFree(pShaper);
}
- LogFlowFunc((": pVM=%p rc=%Rrc\n", pVM, rc));
+ LogFlow(("pdmR3NetShaperInit: pVM=%p rc=%Rrc\n", pVM, rc));
return rc;
}
diff --git a/src/VBox/VMM/VMMR3/PDMQueue.cpp b/src/VBox/VMM/VMMR3/PDMQueue.cpp
index 9b8fc12b..074d72ef 100644
--- a/src/VBox/VMM/VMMR3/PDMQueue.cpp
+++ b/src/VBox/VMM/VMMR3/PDMQueue.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -39,7 +39,7 @@
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
-DECLINLINE(void) pdmR3QueueFree(PPDMQUEUE pQueue, PPDMQUEUEITEMCORE pItem);
+DECLINLINE(void) pdmR3QueueFreeItem(PPDMQUEUE pQueue, PPDMQUEUEITEMCORE pItem);
static bool pdmR3QueueFlush(PPDMQUEUE pQueue);
static DECLCALLBACK(void) pdmR3QueueTimer(PVM pVM, PTMTIMER pTimer, void *pvUser);
@@ -264,11 +264,7 @@ VMMR3_INT_DECL(int) PDMR3QueueCreateDriver(PVM pVM, PPDMDRVINS pDrvIns, size_t c
* Validate input.
*/
VMCPU_ASSERT_EMT(&pVM->aCpus[0]);
- if (!pfnCallback)
- {
- AssertMsgFailed(("No consumer callback!\n"));
- return VERR_INVALID_PARAMETER;
- }
+ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
/*
* Create the queue.
@@ -314,11 +310,7 @@ VMMR3_INT_DECL(int) PDMR3QueueCreateInternal(PVM pVM, size_t cbItem, uint32_t cI
* Validate input.
*/
VMCPU_ASSERT_EMT(&pVM->aCpus[0]);
- if (!pfnCallback)
- {
- AssertMsgFailed(("No consumer callback!\n"));
- return VERR_INVALID_PARAMETER;
- }
+ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
/*
* Create the queue.
@@ -362,11 +354,7 @@ VMMR3_INT_DECL(int) PDMR3QueueCreateExternal(PVM pVM, size_t cbItem, uint32_t cI
* Validate input.
*/
VMCPU_ASSERT_EMT(&pVM->aCpus[0]);
- if (!pfnCallback)
- {
- AssertMsgFailed(("No consumer callback!\n"));
- return VERR_INVALID_PARAMETER;
- }
+ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
/*
* Create the queue.
@@ -457,16 +445,7 @@ VMMR3_INT_DECL(int) PDMR3QueueDestroy(PPDMQUEUE pQueue)
/*
* Deregister statistics.
*/
- STAMR3Deregister(pVM, &pQueue->cbItem);
- STAMR3Deregister(pVM, &pQueue->cbItem);
- STAMR3Deregister(pVM, &pQueue->StatAllocFailures);
- STAMR3Deregister(pVM, &pQueue->StatInsert);
- STAMR3Deregister(pVM, &pQueue->StatFlush);
- STAMR3Deregister(pVM, &pQueue->StatFlushLeftovers);
-#ifdef VBOX_WITH_STATISTICS
- STAMR3Deregister(pVM, &pQueue->StatFlushPrf);
- STAMR3Deregister(pVM, (void *)&pQueue->cStatPending);
-#endif
+ STAMR3DeregisterF(pVM->pUVM, "/PDM/Queue/%s/cbItem", pQueue->pszName);
/*
* Destroy the timer and free it.
@@ -683,7 +662,7 @@ VMMR3_INT_DECL(void) PDMR3QueueFlushAll(PVM pVM)
/* We're done if there were no inserts while we were busy. */
if ( !ASMBitTest(&pVM->pdm.s.fQueueFlushing, PDM_QUEUE_FLUSH_FLAG_PENDING_BIT)
- && !VM_FF_ISPENDING(pVM, VM_FF_PDM_QUEUES))
+ && !VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
break;
VM_FF_CLEAR(pVM, VM_FF_PDM_QUEUES);
}
@@ -764,7 +743,7 @@ static bool pdmR3QueueFlush(PPDMQUEUE pQueue)
break;
pCur = pItems;
pItems = pItems->pNextR3;
- pdmR3QueueFree(pQueue, pCur);
+ pdmR3QueueFreeItem(pQueue, pCur);
}
break;
@@ -775,7 +754,7 @@ static bool pdmR3QueueFlush(PPDMQUEUE pQueue)
break;
pCur = pItems;
pItems = pItems->pNextR3;
- pdmR3QueueFree(pQueue, pCur);
+ pdmR3QueueFreeItem(pQueue, pCur);
}
break;
@@ -786,7 +765,7 @@ static bool pdmR3QueueFlush(PPDMQUEUE pQueue)
break;
pCur = pItems;
pItems = pItems->pNextR3;
- pdmR3QueueFree(pQueue, pCur);
+ pdmR3QueueFreeItem(pQueue, pCur);
}
break;
@@ -797,7 +776,7 @@ static bool pdmR3QueueFlush(PPDMQUEUE pQueue)
break;
pCur = pItems;
pItems = pItems->pNextR3;
- pdmR3QueueFree(pQueue, pCur);
+ pdmR3QueueFreeItem(pQueue, pCur);
}
break;
@@ -858,7 +837,7 @@ static bool pdmR3QueueFlush(PPDMQUEUE pQueue)
* @param pQueue The queue.
* @param pItem The item.
*/
-DECLINLINE(void) pdmR3QueueFree(PPDMQUEUE pQueue, PPDMQUEUEITEMCORE pItem)
+DECLINLINE(void) pdmR3QueueFreeItem(PPDMQUEUE pQueue, PPDMQUEUEITEMCORE pItem)
{
VM_ASSERT_EMT(pQueue->pVMR3);
diff --git a/src/VBox/VMM/VMMR3/PDMThread.cpp b/src/VBox/VMM/VMMR3/PDMThread.cpp
index f3cb3bdb..391fece0 100644
--- a/src/VBox/VMM/VMMR3/PDMThread.cpp
+++ b/src/VBox/VMM/VMMR3/PDMThread.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2007 Oracle Corporation
+ * Copyright (C) 2007-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -957,7 +957,8 @@ VMMR3DECL(int) PDMR3ThreadSuspend(PPDMTHREAD pThread)
/*
* Something failed, initialize termination.
*/
- AssertMsgFailed(("PDMR3ThreadSuspend -> rc=%Rrc enmState=%d\n", rc, pThread->enmState));
+ AssertMsgFailed(("PDMR3ThreadSuspend -> rc=%Rrc enmState=%d suspending '%s'\n",
+ rc, pThread->enmState, RTThreadGetName(pThread->Thread)));
pdmR3ThreadBailOut(pThread);
return rc;
}
diff --git a/src/VBox/VMM/VMMR3/PDMUsb.cpp b/src/VBox/VMM/VMMR3/PDMUsb.cpp
index 179ba44d..0e39f052 100644
--- a/src/VBox/VMM/VMMR3/PDMUsb.cpp
+++ b/src/VBox/VMM/VMMR3/PDMUsb.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -28,6 +28,7 @@
#include <VBox/vmm/vmm.h>
#include <VBox/sup.h>
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include <VBox/version.h>
#include <VBox/err.h>
@@ -225,7 +226,8 @@ static DECLCALLBACK(int) pdmR3UsbReg_Register(PCPDMUSBREGCB pCallbacks, PCPDMUSB
&& pdmR3IsValidName(pReg->szName),
("Invalid name '%.s'\n", sizeof(pReg->szName), pReg->szName),
VERR_PDM_INVALID_USB_REGISTRATION);
- AssertMsgReturn(pReg->fFlags == 0, ("fFlags=%#x\n", pReg->fFlags), VERR_PDM_INVALID_USB_REGISTRATION);
+ AssertMsgReturn((pReg->fFlags & ~(PDM_USBREG_HIGHSPEED_CAPABLE)) == 0,
+ ("fFlags=%#x\n", pReg->fFlags), VERR_PDM_INVALID_USB_REGISTRATION);
AssertMsgReturn(pReg->cMaxInstances > 0,
("Max instances %u! (USB Device %s)\n", pReg->cMaxInstances, pReg->szName),
VERR_PDM_INVALID_USB_REGISTRATION);
@@ -459,21 +461,22 @@ static int pdmR3UsbFindHub(PVM pVM, uint32_t iUsbVersion, PPDMUSBHUB *ppHub)
* @param pUsbDev The USB device emulation.
* @param iInstance -1 if not called by pdmR3UsbInstantiateDevices().
* @param pUuid The UUID for this device.
- * @param pInstanceNode The instance CFGM node. NULL if not called by pdmR3UsbInstantiateDevices().
- * @param ppConfig Pointer to the device configuration pointer. This is set to NULL if inserted
+ * @param ppInstanceNode Pointer to the device instance pointer. This is set to NULL if inserted
* into the tree or cleaned up.
*
- * In the pdmR3UsbInstantiateDevices() case (pInstanceNode != NULL) this is
- * the actual config node and will not be cleaned up.
+ * In the pdmR3UsbInstantiateDevices() case (iInstance != -1) this is
+ * the actual instance node and will not be cleaned up.
*
* @parma iUsbVersion The USB version preferred by the device.
*/
static int pdmR3UsbCreateDevice(PVM pVM, PPDMUSBHUB pHub, PPDMUSB pUsbDev, int iInstance, PCRTUUID pUuid,
- PCFGMNODE pInstanceNode, PCFGMNODE *ppConfig, uint32_t iUsbVersion)
+ PCFGMNODE *ppInstanceNode, uint32_t iUsbVersion)
{
- const bool fAtRuntime = pInstanceNode == NULL;
+ const bool fAtRuntime = iInstance == -1;
int rc;
- NOREF(iUsbVersion);
+
+ AssertPtrReturn(ppInstanceNode, VERR_INVALID_POINTER);
+ AssertPtrReturn(*ppInstanceNode, VERR_INVALID_POINTER);
/*
* If not called by pdmR3UsbInstantiateDevices(), we'll have to fix
@@ -488,8 +491,12 @@ static int pdmR3UsbCreateDevice(PVM pVM, PPDMUSBHUB pHub, PPDMUSB pUsbDev, int i
}
/* The instance node and number. */
- if (!pInstanceNode)
+ PCFGMNODE pInstanceToDelete = NULL;
+ PCFGMNODE pInstanceNode = NULL;
+ if (fAtRuntime)
{
+ /** @todo r=bird: This code is bogus as it ASSUMES that all USB devices are
+ * capable of infinite number of instances. */
for (unsigned c = 0; c < _2M; c++)
{
iInstance = pUsbDev->iNextInstance++;
@@ -498,31 +505,27 @@ static int pdmR3UsbCreateDevice(PVM pVM, PPDMUSBHUB pHub, PPDMUSB pUsbDev, int i
break;
}
AssertRCReturn(rc, rc);
+
+ rc = CFGMR3ReplaceSubTree(pInstanceNode, *ppInstanceNode);
+ AssertRCReturn(rc, rc);
+ *ppInstanceNode = NULL;
+ pInstanceToDelete = pInstanceNode;
}
else
{
Assert(iInstance >= 0);
if (iInstance >= (int)pUsbDev->iNextInstance)
pUsbDev->iNextInstance = iInstance + 1;
+ pInstanceNode = *ppInstanceNode;
}
- /* The instance config node. */
- PCFGMNODE pConfigToDelete = NULL;
- PCFGMNODE pConfig = NULL;
- if (!ppConfig || !*ppConfig)
+ /* Make sure the instance config node exists. */
+ PCFGMNODE pConfig = CFGMR3GetChild(pInstanceNode, "Config");
+ if (!pConfig)
{
rc = CFGMR3InsertNode(pInstanceNode, "Config", &pConfig);
AssertRCReturn(rc, rc);
}
- else if (fAtRuntime)
- {
- rc = CFGMR3InsertSubTree(pInstanceNode, "Config", *ppConfig, &pConfig);
- AssertRCReturn(rc, rc);
- *ppConfig = NULL;
- pConfigToDelete = pConfig;
- }
- else
- pConfig = *ppConfig;
Assert(CFGMR3GetChild(pInstanceNode, "Config") == pConfig);
/* The global device config node. */
@@ -532,7 +535,7 @@ static int pdmR3UsbCreateDevice(PVM pVM, PPDMUSBHUB pHub, PPDMUSB pUsbDev, int i
rc = CFGMR3InsertNode(pDevNode, "GlobalConfig", &pGlobalConfig);
if (RT_FAILURE(rc))
{
- CFGMR3RemoveNode(pConfigToDelete);
+ CFGMR3RemoveNode(pInstanceToDelete);
AssertRCReturn(rc, rc);
}
}
@@ -548,7 +551,7 @@ static int pdmR3UsbCreateDevice(PVM pVM, PPDMUSBHUB pHub, PPDMUSB pUsbDev, int i
{
AssertMsgFailed(("Failed to allocate %d bytes of instance data for USB device '%s'. rc=%Rrc\n",
cb, pUsbDev->pReg->szName, rc));
- CFGMR3RemoveNode(pConfigToDelete);
+ CFGMR3RemoveNode(pInstanceToDelete);
return rc;
}
@@ -562,12 +565,15 @@ static int pdmR3UsbCreateDevice(PVM pVM, PPDMUSBHUB pHub, PPDMUSB pUsbDev, int i
pUsbIns->Internal.s.pVM = pVM;
//pUsbIns->Internal.s.pLuns = NULL;
pUsbIns->Internal.s.pCfg = pInstanceNode;
- pUsbIns->Internal.s.pCfgDelete = pConfigToDelete;
+ pUsbIns->Internal.s.pCfgDelete = pInstanceToDelete;
pUsbIns->Internal.s.pCfgGlobal = pGlobalConfig;
pUsbIns->Internal.s.Uuid = *pUuid;
//pUsbIns->Internal.s.pHub = NULL;
pUsbIns->Internal.s.iPort = UINT32_MAX; /* to be determined. */
- pUsbIns->Internal.s.fVMSuspended = true;
+ /* Set the flag accordingly.
+ * Oherwise VMPowerOff, VMSuspend will not be called for devices attached at runtime.
+ */
+ pUsbIns->Internal.s.fVMSuspended = !fAtRuntime;
//pUsbIns->Internal.s.pfnAsyncNotify = NULL;
pUsbIns->pHlpR3 = &g_pdmR3UsbHlp;
pUsbIns->pReg = pUsbDev->pReg;
@@ -578,6 +584,7 @@ static int pdmR3UsbCreateDevice(PVM pVM, PPDMUSBHUB pHub, PPDMUSB pUsbDev, int i
pUsbIns->pszName = RTStrDup(pUsbDev->pReg->szName);
//pUsbIns->fTracing = 0;
pUsbIns->idTracing = ++pVM->pdm.s.idTracingOther;
+ pUsbIns->iUsbHubVersion = iUsbVersion;
/*
* Link it into all the lists.
@@ -799,11 +806,15 @@ int pdmR3UsbInstantiateDevices(PVM pVM)
}
CFGMR3SetRestrictedRoot(pConfigNode);
- /** @todo
- * Figure out the USB version from the USB device registration and the configuration.
+ /*
+ * Every device must support USB 1.x hubs; optionally, high-speed USB 2.0 hubs
+ * might be also supported. This determines where to attach the device.
*/
uint32_t iUsbVersion = VUSB_STDVER_11;
+ if (paUsbDevs[i].pUsbDev->pReg->fFlags & PDM_USBREG_HIGHSPEED_CAPABLE)
+ iUsbVersion |= VUSB_STDVER_20;
+
/*
* Find a suitable hub with free ports.
*/
@@ -816,12 +827,18 @@ int pdmR3UsbInstantiateDevices(PVM pVM)
}
/*
+ * This is how we inform the device what speed it's communicating at, and hence
+ * which descriptors it should present to the guest.
+ */
+ iUsbVersion &= pHub->fVersions;
+
+ /*
* Create and attach the device.
*/
RTUUID Uuid;
rc = RTUuidCreate(&Uuid);
AssertRCReturn(rc, rc);
- rc = pdmR3UsbCreateDevice(pVM, pHub, paUsbDevs[i].pUsbDev, paUsbDevs[i].iInstance, &Uuid, paUsbDevs[i].pNode, &pConfigNode, iUsbVersion);
+ rc = pdmR3UsbCreateDevice(pVM, pHub, paUsbDevs[i].pUsbDev, paUsbDevs[i].iInstance, &Uuid, &paUsbDevs[i].pNode, iUsbVersion);
if (RT_FAILURE(rc))
return rc;
} /* for device instances */
@@ -831,13 +848,84 @@ int pdmR3UsbInstantiateDevices(PVM pVM)
/**
+ * Creates an emulated USB device instance at runtime.
+ *
+ * This will find an appropriate HUB for the USB device
+ * and try instantiate the emulated device.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pszDeviceName The name of the PDM device to instantiate.
+ * @param pInstanceNode The instance CFGM node.
+ * @param pUuid The UUID to be associated with the device.
+ *
+ * @thread EMT
+ */
+VMMR3DECL(int) PDMR3UsbCreateEmulatedDevice(PUVM pUVM, const char *pszDeviceName, PCFGMNODE pInstanceNode, PCRTUUID pUuid)
+{
+ /*
+ * Validate input.
+ */
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
+ AssertPtrReturn(pszDeviceName, VERR_INVALID_POINTER);
+ AssertPtrReturn(pInstanceNode, VERR_INVALID_POINTER);
+
+ /*
+ * Find the device.
+ */
+ PPDMUSB pUsbDev = pdmR3UsbLookup(pVM, pszDeviceName);
+ if (!pUsbDev)
+ {
+ LogRel(("PDMR3UsbCreateEmulatedDevice: The '%s' device wasn't found\n", pszDeviceName));
+ return VERR_PDM_NO_USBPROXY;
+ }
+
+ /*
+ * Every device must support USB 1.x hubs; optionally, high-speed USB 2.0 hubs
+ * might be also supported. This determines where to attach the device.
+ */
+ uint32_t iUsbVersion = VUSB_STDVER_11;
+ if (pUsbDev->pReg->fFlags & PDM_USBREG_HIGHSPEED_CAPABLE)
+ iUsbVersion |= VUSB_STDVER_20;
+
+ /*
+ * Find a suitable hub with free ports.
+ */
+ PPDMUSBHUB pHub;
+ int rc = pdmR3UsbFindHub(pVM, iUsbVersion, &pHub);
+ if (RT_FAILURE(rc))
+ {
+ Log(("pdmR3UsbFindHub: failed %Rrc\n", rc));
+ return rc;
+ }
+
+ /*
+ * This is how we inform the device what speed it's communicating at, and hence
+ * which descriptors it should present to the guest.
+ */
+ iUsbVersion &= pHub->fVersions;
+
+ /*
+ * Create and attach the device.
+ */
+ rc = pdmR3UsbCreateDevice(pVM, pHub, pUsbDev, -1, pUuid, &pInstanceNode, iUsbVersion);
+ AssertRCReturn(rc, rc);
+
+ return rc;
+}
+
+
+/**
* Creates a USB proxy device instance.
*
* This will find an appropriate HUB for the USB device, create the necessary CFGM stuff
* and try instantiate the proxy device.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pUuid The UUID to be associated with the device.
* @param fRemote Whether it's a remove or local device.
* @param pszAddress The address string.
@@ -845,13 +933,16 @@ int pdmR3UsbInstantiateDevices(PVM pVM)
* @param iUsbVersion The preferred USB version.
* @param fMaskedIfs The interfaces to hide from the guest.
*/
-VMMR3DECL(int) PDMR3USBCreateProxyDevice(PVM pVM, PCRTUUID pUuid, bool fRemote, const char *pszAddress, void *pvBackend,
+VMMR3DECL(int) PDMR3UsbCreateProxyDevice(PUVM pUVM, PCRTUUID pUuid, bool fRemote, const char *pszAddress, void *pvBackend,
uint32_t iUsbVersion, uint32_t fMaskedIfs)
{
/*
* Validate input.
*/
- VM_ASSERT_EMT(pVM);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
AssertPtrReturn(pUuid, VERR_INVALID_POINTER);
AssertPtrReturn(pszAddress, VERR_INVALID_POINTER);
AssertReturn( iUsbVersion == VUSB_STDVER_20
@@ -863,7 +954,7 @@ VMMR3DECL(int) PDMR3USBCreateProxyDevice(PVM pVM, PCRTUUID pUuid, bool fRemote,
PPDMUSB pUsbDev = pdmR3UsbLookup(pVM, "USBProxy");
if (!pUsbDev)
{
- LogRel(("PDMR3USBCreateProxyDevice: The USBProxy device class wasn't found\n"));
+ LogRel(("PDMR3UsbCreateProxyDevice: The USBProxy device class wasn't found\n"));
return VERR_PDM_NO_USBPROXY;
}
@@ -879,12 +970,14 @@ VMMR3DECL(int) PDMR3USBCreateProxyDevice(PVM pVM, PCRTUUID pUuid, bool fRemote,
}
/*
- * Create the CFGM configuration node.
+ * Create the CFGM instance node.
*/
- PCFGMNODE pConfig = CFGMR3CreateTree(pVM);
- AssertReturn(pConfig, VERR_NO_MEMORY);
+ PCFGMNODE pInstance = CFGMR3CreateTree(pUVM);
+ AssertReturn(pInstance, VERR_NO_MEMORY);
do /* break loop */
{
+ PCFGMNODE pConfig;
+ rc = CFGMR3InsertNode(pInstance, "Config", &pConfig); AssertRCBreak(rc);
rc = CFGMR3InsertString(pConfig, "Address", pszAddress); AssertRCBreak(rc);
char szUuid[RTUUID_STR_LENGTH];
rc = RTUuidToStr(pUuid, &szUuid[0], sizeof(szUuid)); AssertRCBreak(rc);
@@ -897,17 +990,17 @@ VMMR3DECL(int) PDMR3USBCreateProxyDevice(PVM pVM, PCRTUUID pUuid, bool fRemote,
} while (0); /* break loop */
if (RT_FAILURE(rc))
{
- CFGMR3RemoveNode(pConfig);
- LogRel(("PDMR3USBCreateProxyDevice: failed to setup CFGM config, rc=%Rrc\n", rc));
+ CFGMR3RemoveNode(pInstance);
+ LogRel(("PDMR3UsbCreateProxyDevice: failed to setup CFGM config, rc=%Rrc\n", rc));
return rc;
}
/*
- * Finally, try create it.
+ * Finally, try to create it.
*/
- rc = pdmR3UsbCreateDevice(pVM, pHub, pUsbDev, -1, pUuid, NULL, &pConfig, iUsbVersion);
- if (RT_FAILURE(rc) && pConfig)
- CFGMR3RemoveNode(pConfig);
+ rc = pdmR3UsbCreateDevice(pVM, pHub, pUsbDev, -1, pUuid, &pInstance, iUsbVersion);
+ if (RT_FAILURE(rc) && pInstance)
+ CFGMR3RemoveNode(pInstance);
return rc;
}
@@ -953,6 +1046,9 @@ static void pdmR3UsbDestroyDevice(PVM pVM, PPDMUSBINS pUsbIns)
TMR3TimerDestroyUsb(pVM, pUsbIns);
//SSMR3DeregisterUsb(pVM, pUsbIns, NULL, 0);
pdmR3ThreadDestroyUsb(pVM, pUsbIns);
+#ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
+ pdmR3AsyncCompletionTemplateDestroyUsb(pVM, pUsbIns);
+#endif
/*
* Unlink it.
@@ -1009,18 +1105,20 @@ static void pdmR3UsbDestroyDevice(PVM pVM, PPDMUSBINS pUsbIns)
* Detaches and destroys a USB device.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pUuid The UUID associated with the device to detach.
* @thread EMT
*/
-VMMR3DECL(int) PDMR3USBDetachDevice(PVM pVM, PCRTUUID pUuid)
+VMMR3DECL(int) PDMR3UsbDetachDevice(PUVM pUVM, PCRTUUID pUuid)
{
/*
* Validate input.
*/
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
VM_ASSERT_EMT(pVM);
AssertPtrReturn(pUuid, VERR_INVALID_POINTER);
- AssertPtrReturn(pVM, VERR_INVALID_POINTER);
/*
* Search the global list for it.
@@ -1064,10 +1162,13 @@ VMMR3DECL(int) PDMR3USBDetachDevice(PVM pVM, PCRTUUID pUuid)
* Checks if there are any USB hubs attached.
*
* @returns true / false accordingly.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
*/
-VMMR3DECL(bool) PDMR3USBHasHub(PVM pVM)
+VMMR3DECL(bool) PDMR3UsbHasHub(PUVM pUVM)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, false);
return pVM->pdm.s.pUsbHubs != NULL;
}
@@ -1440,6 +1541,32 @@ static DECLCALLBACK(void) pdmR3UsbHlp_AsyncNotificationCompleted(PPDMUSBINS pUsb
}
+/** @interface_method_impl{PDMUSBHLP,pfnVMGetSuspendReason} */
+static DECLCALLBACK(VMSUSPENDREASON) pdmR3UsbHlp_VMGetSuspendReason(PPDMUSBINS pUsbIns)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ PVM pVM = pUsbIns->Internal.s.pVM;
+ VM_ASSERT_EMT(pVM);
+ VMSUSPENDREASON enmReason = VMR3GetSuspendReason(pVM->pUVM);
+ LogFlow(("pdmR3UsbHlp_VMGetSuspendReason: caller='%s'/%d: returns %d\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, enmReason));
+ return enmReason;
+}
+
+
+/** @interface_method_impl{PDMUSBHLP,pfnVMGetResumeReason} */
+static DECLCALLBACK(VMRESUMEREASON) pdmR3UsbHlp_VMGetResumeReason(PPDMUSBINS pUsbIns)
+{
+ PDMUSB_ASSERT_USBINS(pUsbIns);
+ PVM pVM = pUsbIns->Internal.s.pVM;
+ VM_ASSERT_EMT(pVM);
+ VMRESUMEREASON enmReason = VMR3GetResumeReason(pVM->pUVM);
+ LogFlow(("pdmR3UsbHlp_VMGetResumeReason: caller='%s'/%d: returns %d\n",
+ pUsbIns->pReg->szName, pUsbIns->iInstance, enmReason));
+ return enmReason;
+}
+
+
/**
* The USB device helper structure.
*/
@@ -1463,6 +1590,18 @@ const PDMUSBHLP g_pdmR3UsbHlp =
pdmR3UsbHlp_ThreadCreate,
pdmR3UsbHlp_SetAsyncNotification,
pdmR3UsbHlp_AsyncNotificationCompleted,
+ pdmR3UsbHlp_VMGetSuspendReason,
+ pdmR3UsbHlp_VMGetResumeReason,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
PDM_USBHLP_VERSION
};
diff --git a/src/VBox/VMM/VMMR3/PGM.cpp b/src/VBox/VMM/VMMR3/PGM.cpp
index b0487c62..126a06d1 100644
--- a/src/VBox/VMM/VMMR3/PGM.cpp
+++ b/src/VBox/VMM/VMMR3/PGM.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2011 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -81,6 +81,12 @@
* code to do the work. All memory use for those page tables is located below
* 4GB (this includes page tables for guest context mappings).
*
+ * Note! The intermediate memory context is also used for 64-bit guest
+ * execution on 32-bit hosts. Because we need to load 64-bit registers
+ * prior to switching to guest context, we need to be in 64-bit mode
+ * first. So, HM has some 64-bit worker routines in VMMRC.rc that get
+ * invoked via the special world switcher code in LegacyToAMD64.asm.
+ *
*
* @subsection subsec_pgm_int_gc Guest Context Mappings
*
@@ -621,9 +627,10 @@
#endif
#include <VBox/vmm/selm.h>
#include <VBox/vmm/ssm.h>
-#include <VBox/vmm/hwaccm.h>
+#include <VBox/vmm/hm.h>
#include "PGMInternal.h"
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include "PGMInline.h"
#include <VBox/dbg.h>
@@ -652,22 +659,20 @@ static DECLCALLBACK(int) pgmR3RelocatePhysHandler(PAVLROGCPHYSNODECORE pNode, v
static DECLCALLBACK(int) pgmR3RelocateVirtHandler(PAVLROGCPTRNODECORE pNode, void *pvUser);
static DECLCALLBACK(int) pgmR3RelocateHyperVirtHandler(PAVLROGCPTRNODECORE pNode, void *pvUser);
#ifdef VBOX_STRICT
-static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PVM pVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser);
+static FNVMATSTATE pgmR3ResetNoMorePhysWritesFlag;
#endif
static int pgmR3ModeDataInit(PVM pVM, bool fResolveGCAndR0);
static void pgmR3ModeDataSwitch(PVM pVM, PVMCPU pVCpu, PGMMODE enmShw, PGMMODE enmGst);
static PGMMODE pgmR3CalcShadowMode(PVM pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode, VMMSWITCHER *penmSwitcher);
#ifdef VBOX_WITH_DEBUGGER
-/** @todo Convert the first two commands to 'info' items. */
-static DECLCALLBACK(int) pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
-static DECLCALLBACK(int) pgmR3CmdError(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
-static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
-static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
+static FNDBGCCMD pgmR3CmdError;
+static FNDBGCCMD pgmR3CmdSync;
+static FNDBGCCMD pgmR3CmdSyncAlways;
# ifdef VBOX_STRICT
-static DECLCALLBACK(int) pgmR3CmdAssertCR3(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
+static FNDBGCCMD pgmR3CmdAssertCR3;
# endif
-static DECLCALLBACK(int) pgmR3CmdPhysToFile(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
+static FNDBGCCMD pgmR3CmdPhysToFile;
#endif
@@ -702,7 +707,6 @@ static const DBGCVARDESC g_aPgmCountPhysWritesArgs[] =
static const DBGCCMD g_aCmds[] =
{
/* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
- { "pgmram", 0, 0, NULL, 0, 0, pgmR3CmdRam, "", "Display the ram ranges." },
{ "pgmsync", 0, 0, NULL, 0, 0, pgmR3CmdSync, "", "Sync the CR3 page." },
{ "pgmerror", 0, 1, &g_aPgmErrorArgs[0], 1, 0, pgmR3CmdError, "", "Enables inject runtime of errors into parts of PGM." },
{ "pgmerroroff", 0, 1, &g_aPgmErrorArgs[0], 1, 0, pgmR3CmdError, "", "Disables inject runtime errors into parts of PGM." },
@@ -1227,9 +1231,6 @@ VMMR3DECL(int) PGMR3Init(PVM pVM)
/*
* Init the structure.
*/
-#ifdef PGM_WITHOUT_MAPPINGS
- pVM->pgm.s.fMappingsDisabled = true;
-#endif
pVM->pgm.s.offVM = RT_OFFSETOF(VM, pgm.s);
pVM->pgm.s.offVCpuPGM = RT_OFFSETOF(VMCPU, pgm.s);
@@ -1368,7 +1369,7 @@ VMMR3DECL(int) PGMR3Init(PVM pVM)
* Register callbacks, string formatters and the saved state data unit.
*/
#ifdef VBOX_STRICT
- VMR3AtStateRegister(pVM, pgmR3ResetNoMorePhysWritesFlag, NULL);
+ VMR3AtStateRegister(pVM->pUVM, pgmR3ResetNoMorePhysWritesFlag, NULL);
#endif
PGMRegisterStringFormatTypes();
@@ -1890,6 +1891,8 @@ static int pgmR3InitStats(PVM pVM)
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2OutOfSyncHndObs, "/PGM/CPU%u/RZ/Trap0e/Time2/OutOfSyncObsHnd", "Profiling of the Trap0eHandler body when the cause is an obsolete handler page.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2SyncPT, "/PGM/CPU%u/RZ/Trap0e/Time2/SyncPT", "Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2WPEmulation, "/PGM/CPU%u/RZ/Trap0e/Time2/WPEmulation", "Profiling of the Trap0eHandler body when the cause is CR0.WP emulation.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2Wp0RoUsHack, "/PGM/CPU%u/RZ/Trap0e/Time2/WP0R0USHack", "Profiling of the Trap0eHandler body when the cause is CR0.WP and netware hack to be enabled.");
+ PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2Wp0RoUsUnhack, "/PGM/CPU%u/RZ/Trap0e/Time2/WP0R0USUnhack", "Profiling of the Trap0eHandler body when the cause is CR0.WP and netware hack to be disabled.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eConflicts, "/PGM/CPU%u/RZ/Trap0e/Conflicts", "The number of times #PF was caused by an undetected conflict.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersMapping, "/PGM/CPU%u/RZ/Trap0e/Handlers/Mapping", "Number of traps due to access handlers in mappings.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersOutOfSync, "/PGM/CPU%u/RZ/Trap0e/Handlers/OutOfSync", "Number of traps due to out-of-sync handled pages.");
@@ -2164,6 +2167,7 @@ VMMR3DECL(int) PGMR3InitFinalize(PVM pVM)
pVM->pgm.s.HCPhysInvMmioPg |= UINT64_C(0x000f0000000000);
}
+ /** @todo query from CPUM. */
pVM->pgm.s.GCPhysInvAddrMask = 0;
for (uint32_t iBit = cMaxPhysAddrWidth; iBit < 64; iBit++)
pVM->pgm.s.GCPhysInvAddrMask |= RT_BIT_64(iBit);
@@ -2238,13 +2242,13 @@ VMMR3_INT_DECL(int) PGMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
{
switch (enmWhat)
{
- case VMINITCOMPLETED_HWACCM:
+ case VMINITCOMPLETED_HM:
#ifdef VBOX_WITH_PCI_PASSTHROUGH
if (pVM->pgm.s.fPciPassthrough)
{
AssertLogRelReturn(pVM->pgm.s.fRamPreAlloc, VERR_PCI_PASSTHROUGH_NO_RAM_PREALLOC);
- AssertLogRelReturn(HWACCMIsEnabled(pVM), VERR_PCI_PASSTHROUGH_NO_HWACCM);
- AssertLogRelReturn(HWACCMIsNestedPagingActive(pVM), VERR_PCI_PASSTHROUGH_NO_NESTED_PAGING);
+ AssertLogRelReturn(HMIsEnabled(pVM), VERR_PCI_PASSTHROUGH_NO_HM);
+ AssertLogRelReturn(HMIsNestedPagingActive(pVM), VERR_PCI_PASSTHROUGH_NO_NESTED_PAGING);
/*
* Report assignments to the IOMMU (hope that's good enough for now).
@@ -2376,7 +2380,7 @@ VMMR3DECL(void) PGMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
*/
pVM->pgm.s.pvZeroPgR0 = MMHyperR3ToR0(pVM, pVM->pgm.s.pvZeroPgR3);
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
- AssertRelease(pVM->pgm.s.pvZeroPgR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
+ AssertRelease(pVM->pgm.s.pvZeroPgR0 != NIL_RTR0PTR || !HMIsEnabled(pVM));
#else
AssertRelease(pVM->pgm.s.pvZeroPgR0 != NIL_RTR0PTR);
#endif
@@ -2470,7 +2474,7 @@ static DECLCALLBACK(int) pgmR3RelocateHyperVirtHandler(PAVLROGCPTRNODECORE pNode
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
*/
-VMMR3DECL(void) PGMR3ResetUnpluggedCpu(PVM pVM, PVMCPU pVCpu)
+VMMR3DECL(void) PGMR3ResetCpu(PVM pVM, PVMCPU pVCpu)
{
int rc = PGM_GST_PFN(Exit, pVCpu)(pVCpu);
AssertRC(rc);
@@ -2504,10 +2508,8 @@ VMMR3DECL(void) PGMR3ResetUnpluggedCpu(PVM pVM, PVMCPU pVCpu)
*
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(void) PGMR3Reset(PVM pVM)
+VMMR3_INT_DECL(void) PGMR3Reset(PVM pVM)
{
- int rc;
-
LogFlow(("PGMR3Reset:\n"));
VM_ASSERT_EMT(pVM);
@@ -2528,13 +2530,13 @@ VMMR3DECL(void) PGMR3Reset(PVM pVM)
for (VMCPUID i = 0; i < pVM->cCpus; i++)
{
PVMCPU pVCpu = &pVM->aCpus[i];
- rc = PGM_GST_PFN(Exit, pVCpu)(pVCpu);
- AssertRC(rc);
+ int rc = PGM_GST_PFN(Exit, pVCpu)(pVCpu);
+ AssertReleaseRC(rc);
}
#ifdef DEBUG
- DBGFR3InfoLog(pVM, "mappings", NULL);
- DBGFR3InfoLog(pVM, "handlers", "all nostat");
+ DBGFR3_INFO_LOG(pVM, "mappings", NULL);
+ DBGFR3_INFO_LOG(pVM, "handlers", "all nostat");
#endif
/*
@@ -2544,8 +2546,8 @@ VMMR3DECL(void) PGMR3Reset(PVM pVM)
{
PVMCPU pVCpu = &pVM->aCpus[i];
- rc = PGMR3ChangeMode(pVM, pVCpu, PGMMODE_REAL);
- AssertRC(rc);
+ int rc = PGMR3ChangeMode(pVM, pVCpu, PGMMODE_REAL);
+ AssertReleaseRC(rc);
STAM_REL_COUNTER_RESET(&pVCpu->pgm.s.cGuestModeChanges);
STAM_REL_COUNTER_RESET(&pVCpu->pgm.s.cA20Changes);
@@ -2577,21 +2579,36 @@ VMMR3DECL(void) PGMR3Reset(PVM pVM)
pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
pgmR3RefreshShadowModeAfterA20Change(pVCpu);
- HWACCMFlushTLB(pVCpu);
+ HMFlushTLB(pVCpu);
#endif
}
}
- /*
- * Reset (zero) RAM and shadow ROM pages.
- */
- rc = pgmR3PhysRamReset(pVM);
- if (RT_SUCCESS(rc))
- rc = pgmR3PhysRomReset(pVM);
+ pgmUnlock(pVM);
+}
- pgmUnlock(pVM);
- AssertReleaseRC(rc);
+/**
+ * Memory setup after VM construction or reset.
+ *
+ * @param pVM Pointer to the VM.
+ * @param fAtReset Indicates the context, after reset if @c true or after
+ * construction if @c false.
+ */
+VMMR3_INT_DECL(void) PGMR3MemSetup(PVM pVM, bool fAtReset)
+{
+ if (fAtReset)
+ {
+ pgmLock(pVM);
+
+ int rc = pgmR3PhysRamZeroAll(pVM);
+ AssertReleaseRC(rc);
+
+ rc = pgmR3PhysRomReset(pVM);
+ AssertReleaseRC(rc);
+
+ pgmUnlock(pVM);
+ }
}
@@ -2600,11 +2617,11 @@ VMMR3DECL(void) PGMR3Reset(PVM pVM)
* VM state change callback for clearing fNoMorePhysWrites after
* a snapshot has been created.
*/
-static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PVM pVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser)
+static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PUVM pUVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser)
{
if ( enmState == VMSTATE_RUNNING
|| enmState == VMSTATE_RESUMING)
- pVM->pgm.s.fNoMorePhysWrites = false;
+ pUVM->pVM->pgm.s.fNoMorePhysWrites = false;
NOREF(enmOldState); NOREF(pvUser);
}
#endif
@@ -2719,6 +2736,7 @@ static DECLCALLBACK(void) pgmR3PhysInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char
pCur->pszDesc);
}
+
/**
* Dump the page directory to the log.
*
@@ -3172,7 +3190,7 @@ static PGMMODE pgmR3CalcShadowMode(PVM pVM, PGMMODE enmGuestMode, SUPPAGINGMODE
case PGMMODE_REAL:
case PGMMODE_PROTECTED:
if ( enmShadowMode != PGMMODE_INVALID
- && !HWACCMIsEnabled(pVM) /* always switch in hwaccm mode! */)
+ && !HMIsEnabled(pVM) /* always switch in hm mode! */)
break; /* (no change) */
switch (enmHostMode)
@@ -3327,9 +3345,9 @@ static PGMMODE pgmR3CalcShadowMode(PVM pVM, PGMMODE enmGuestMode, SUPPAGINGMODE
return PGMMODE_INVALID;
}
/* Override the shadow mode is nested paging is active. */
- pVM->pgm.s.fNestedPaging = HWACCMIsNestedPagingActive(pVM);
+ pVM->pgm.s.fNestedPaging = HMIsNestedPagingActive(pVM);
if (pVM->pgm.s.fNestedPaging)
- enmShadowMode = HWACCMGetShwPagingMode(pVM);
+ enmShadowMode = HMGetShwPagingMode(pVM);
*penmSwitcher = enmSwitcher;
return enmShadowMode;
@@ -3366,7 +3384,8 @@ VMMR3DECL(int) PGMR3ChangeMode(PVM pVM, PVMCPU pVCpu, PGMMODE enmGuestMode)
enmShadowMode = pgmR3CalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode, &enmSwitcher);
#ifdef VBOX_WITH_RAW_MODE
- if (enmSwitcher != VMMSWITCHER_INVALID)
+ if ( enmSwitcher != VMMSWITCHER_INVALID
+ && !HMIsEnabled(pVM))
{
/*
* Select new switcher.
@@ -3561,7 +3580,7 @@ VMMR3DECL(int) PGMR3ChangeMode(PVM pVM, PVMCPU pVCpu, PGMMODE enmGuestMode)
CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
- N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (General/Advanced)"));
+ N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
rc = PGM_GST_NAME_PAE(Enter)(pVCpu, GCPhysCR3);
@@ -3628,8 +3647,8 @@ VMMR3DECL(int) PGMR3ChangeMode(PVM pVM, PVMCPU pVCpu, PGMMODE enmGuestMode)
rc = VINF_SUCCESS;
}
- /* Notify HWACCM as well. */
- HWACCMR3PagingModeChanged(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
+ /* Notify HM as well. */
+ HMR3PagingModeChanged(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
return rc;
}
@@ -3665,7 +3684,7 @@ int pgmR3ReEnterShadowModeAfterPoolFlush(PVM pVM, PVMCPU pVCpu)
{
pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID;
int rc = PGMR3ChangeMode(pVM, pVCpu, PGMGetGuestMode(pVCpu));
- Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
+ Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
AssertRCReturn(rc, rc);
AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
@@ -3695,75 +3714,27 @@ void pgmR3RefreshShadowModeAfterA20Change(PVMCPU pVCpu)
#ifdef VBOX_WITH_DEBUGGER
/**
- * The '.pgmram' command.
- *
- * @returns VBox status.
- * @param pCmd Pointer to the command descriptor (as registered).
- * @param pCmdHlp Pointer to command helper functions.
- * @param pVM Pointer to the current VM (if any).
- * @param paArgs Pointer to (readonly) array of arguments.
- * @param cArgs Number of arguments in the array.
+ * @callback_method_impl{FNDBGCCMD, The '.pgmerror' and '.pgmerroroff' commands.}
*/
-static DECLCALLBACK(int) pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
+static DECLCALLBACK(int) pgmR3CmdError(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
{
/*
* Validate input.
*/
- NOREF(pCmd); NOREF(paArgs); NOREF(cArgs);
- if (!pVM)
- return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires a VM to be selected.\n");
- if (!pVM->pgm.s.pRamRangesXR3)
- return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Sorry, no Ram is registered.\n");
-
- /*
- * Dump the ranges.
- */
- int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, "From - To (incl) pvHC\n");
- PPGMRAMRANGE pRam;
- for (pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
- {
- rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL,
- "%RGp - %RGp %p\n",
- pRam->GCPhys, pRam->GCPhysLast, pRam->pvR3);
- if (RT_FAILURE(rc))
- return rc;
- }
-
- return VINF_SUCCESS;
-}
-
-
-/**
- * The '.pgmerror' and '.pgmerroroff' commands.
- *
- * @returns VBox status.
- * @param pCmd Pointer to the command descriptor (as registered).
- * @param pCmdHlp Pointer to command helper functions.
- * @param pVM Pointer to the current VM (if any).
- * @param paArgs Pointer to (readonly) array of arguments.
- * @param cArgs Number of arguments in the array.
- */
-static DECLCALLBACK(int) pgmR3CmdError(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
-{
- /*
- * Validate input.
- */
- if (!pVM)
- return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires a VM to be selected.\n");
- AssertReturn(cArgs == 0 || (cArgs == 1 && paArgs[0].enmType == DBGCVAR_TYPE_STRING),
- pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: Hit bug in the parser.\n"));
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ PVM pVM = pUVM->pVM;
+ DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 0, cArgs == 0 || (cArgs == 1 && paArgs[0].enmType == DBGCVAR_TYPE_STRING));
if (!cArgs)
{
/*
* Print the list of error injection locations with status.
*/
- pCmdHlp->pfnPrintf(pCmdHlp, NULL, "PGM error inject locations:\n");
- pCmdHlp->pfnPrintf(pCmdHlp, NULL, " handy - %RTbool\n", pVM->pgm.s.fErrInjHandyPages);
+ DBGCCmdHlpPrintf(pCmdHlp, "PGM error inject locations:\n");
+ DBGCCmdHlpPrintf(pCmdHlp, " handy - %RTbool\n", pVM->pgm.s.fErrInjHandyPages);
}
else
{
-
/*
* String switch on where to inject the error.
*/
@@ -3772,150 +3743,138 @@ static DECLCALLBACK(int) pgmR3CmdError(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM
if (!strcmp(pszWhere, "handy"))
ASMAtomicWriteBool(&pVM->pgm.s.fErrInjHandyPages, fNewState);
else
- return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: Invalid 'where' value: %s.\n", pszWhere);
- pCmdHlp->pfnPrintf(pCmdHlp, NULL, "done\n");
+ return DBGCCmdHlpPrintf(pCmdHlp, "error: Invalid 'where' value: %s.\n", pszWhere);
+ DBGCCmdHlpPrintf(pCmdHlp, "done\n");
}
return VINF_SUCCESS;
}
/**
- * The '.pgmsync' command.
- *
- * @returns VBox status.
- * @param pCmd Pointer to the command descriptor (as registered).
- * @param pCmdHlp Pointer to command helper functions.
- * @param pVM Pointer to the current VM (if any).
- * @param paArgs Pointer to (readonly) array of arguments.
- * @param cArgs Number of arguments in the array.
+ * @callback_method_impl{FNDBGCCMD, The '.pgmsync' command.}
*/
-static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
+static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
{
- /** @todo SMP support */
-
/*
* Validate input.
*/
NOREF(pCmd); NOREF(paArgs); NOREF(cArgs);
- if (!pVM)
- return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires a VM to be selected.\n");
-
- PVMCPU pVCpu = &pVM->aCpus[0];
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, DBGCCmdHlpGetCurrentCpu(pCmdHlp));
+ if (!pVCpu)
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "Invalid CPU ID");
/*
* Force page directory sync.
*/
VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
- int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Forcing page directory sync.\n");
+ int rc = DBGCCmdHlpPrintf(pCmdHlp, "Forcing page directory sync.\n");
if (RT_FAILURE(rc))
return rc;
return VINF_SUCCESS;
}
-
#ifdef VBOX_STRICT
+
/**
- * The '.pgmassertcr3' command.
- *
- * @returns VBox status.
- * @param pCmd Pointer to the command descriptor (as registered).
- * @param pCmdHlp Pointer to command helper functions.
- * @param pVM Pointer to the current VM (if any).
- * @param paArgs Pointer to (readonly) array of arguments.
- * @param cArgs Number of arguments in the array.
+ * EMT callback for pgmR3CmdAssertCR3.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param pcErrors Where to return the error count.
*/
-static DECLCALLBACK(int) pgmR3CmdAssertCR3(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
+static DECLCALLBACK(int) pgmR3CmdAssertCR3EmtWorker(PUVM pUVM, unsigned *pcErrors)
{
- /** @todo SMP support!! */
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ PVMCPU pVCpu = VMMGetCpu(pVM);
+
+ *pcErrors = PGMAssertCR3(pVM, pVCpu, CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu));
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * @callback_method_impl{FNDBGCCMD, The '.pgmassertcr3' command.}
+ */
+static DECLCALLBACK(int) pgmR3CmdAssertCR3(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
+{
/*
* Validate input.
*/
NOREF(pCmd); NOREF(paArgs); NOREF(cArgs);
- if (!pVM)
- return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires a VM to be selected.\n");
-
- PVMCPU pVCpu = &pVM->aCpus[0];
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
- int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Checking shadow CR3 page tables for consistency.\n");
+ int rc = DBGCCmdHlpPrintf(pCmdHlp, "Checking shadow CR3 page tables for consistency.\n");
if (RT_FAILURE(rc))
return rc;
- PGMAssertCR3(pVM, pVCpu, CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu));
-
- return VINF_SUCCESS;
+ unsigned cErrors = 0;
+ rc = VMR3ReqCallWaitU(pUVM, DBGCCmdHlpGetCurrentCpu(pCmdHlp), (PFNRT)pgmR3CmdAssertCR3EmtWorker, 2, pUVM, &cErrors);
+ if (RT_FAILURE(rc))
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "VMR3ReqCallWaitU failed: %Rrc", rc);
+ if (cErrors > 0)
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "PGMAssertCR3: %u error(s)", cErrors);
+ return DBGCCmdHlpPrintf(pCmdHlp, "PGMAssertCR3: OK\n");
}
-#endif /* VBOX_STRICT */
+#endif /* VBOX_STRICT */
/**
- * The '.pgmsyncalways' command.
- *
- * @returns VBox status.
- * @param pCmd Pointer to the command descriptor (as registered).
- * @param pCmdHlp Pointer to command helper functions.
- * @param pVM Pointer to the current VM (if any).
- * @param paArgs Pointer to (readonly) array of arguments.
- * @param cArgs Number of arguments in the array.
+ * @callback_method_impl{FNDBGCCMD, The '.pgmsyncalways' command.}
*/
-static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
+static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
{
- /** @todo SMP support!! */
- PVMCPU pVCpu = &pVM->aCpus[0];
-
/*
* Validate input.
*/
NOREF(pCmd); NOREF(paArgs); NOREF(cArgs);
- if (!pVM)
- return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires a VM to be selected.\n");
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, DBGCCmdHlpGetCurrentCpu(pCmdHlp));
+ if (!pVCpu)
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "Invalid CPU ID");
/*
* Force page directory sync.
*/
+ int rc;
if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS)
{
ASMAtomicAndU32(&pVCpu->pgm.s.fSyncFlags, ~PGM_SYNC_ALWAYS);
- return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Disabled permanent forced page directory syncing.\n");
+ rc = DBGCCmdHlpPrintf(pCmdHlp, "Disabled permanent forced page directory syncing.\n");
}
else
{
ASMAtomicOrU32(&pVCpu->pgm.s.fSyncFlags, PGM_SYNC_ALWAYS);
VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
- return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Enabled permanent forced page directory syncing.\n");
+ rc = DBGCCmdHlpPrintf(pCmdHlp, "Enabled permanent forced page directory syncing.\n");
}
+ return rc;
}
/**
- * The '.pgmphystofile' command.
- *
- * @returns VBox status.
- * @param pCmd Pointer to the command descriptor (as registered).
- * @param pCmdHlp Pointer to command helper functions.
- * @param pVM Pointer to the current VM (if any).
- * @param paArgs Pointer to (readonly) array of arguments.
- * @param cArgs Number of arguments in the array.
+ * @callback_method_impl{FNDBGCCMD, The '.pgmphystofile' command.}
*/
-static DECLCALLBACK(int) pgmR3CmdPhysToFile(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
+static DECLCALLBACK(int) pgmR3CmdPhysToFile(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
{
/*
* Validate input.
*/
NOREF(pCmd);
- if (!pVM)
- return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires a VM to be selected.\n");
- if ( cArgs < 1
- || cArgs > 2
- || paArgs[0].enmType != DBGCVAR_TYPE_STRING
- || ( cArgs > 1
- && paArgs[1].enmType != DBGCVAR_TYPE_STRING))
- return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: parser error, invalid arguments.\n");
- if ( cArgs >= 2
- && strcmp(paArgs[1].u.pszString, "nozero"))
- return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: Invalid 2nd argument '%s', must be 'nozero'.\n", paArgs[1].u.pszString);
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ PVM pVM = pUVM->pVM;
+ DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 0, cArgs == 1 || cArgs == 2);
+ DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 0, paArgs[0].enmType != DBGCVAR_TYPE_STRING);
+ if (cArgs == 2)
+ {
+ DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, 1, paArgs[2].enmType != DBGCVAR_TYPE_STRING);
+ if (strcmp(paArgs[1].u.pszString, "nozero"))
+ return DBGCCmdHlpFail(pCmdHlp, pCmd, "Invalid 2nd argument '%s', must be 'nozero'.\n", paArgs[1].u.pszString);
+ }
bool fIncZeroPgs = cArgs < 2;
/*
@@ -3924,12 +3883,12 @@ static DECLCALLBACK(int) pgmR3CmdPhysToFile(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp,
RTFILE hFile;
int rc = RTFileOpen(&hFile, paArgs[0].u.pszString, RTFILE_O_WRITE | RTFILE_O_CREATE_REPLACE | RTFILE_O_DENY_WRITE);
if (RT_FAILURE(rc))
- return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: RTFileOpen(,'%s',) -> %Rrc.\n", paArgs[0].u.pszString, rc);
+ return DBGCCmdHlpPrintf(pCmdHlp, "error: RTFileOpen(,'%s',) -> %Rrc.\n", paArgs[0].u.pszString, rc);
uint32_t cbRamHole = 0;
- CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHole, MM_RAM_HOLE_SIZE_DEFAULT);
+ CFGMR3QueryU32Def(CFGMR3GetRootU(pUVM), "RamHoleSize", &cbRamHole, MM_RAM_HOLE_SIZE_DEFAULT);
uint64_t cbRam = 0;
- CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0);
+ CFGMR3QueryU64Def(CFGMR3GetRootU(pUVM), "RamSize", &cbRam, 0);
RTGCPHYS GCPhysEnd = cbRam + cbRamHole;
/*
@@ -3964,7 +3923,7 @@ static DECLCALLBACK(int) pgmR3CmdPhysToFile(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp,
{
rc = RTFileWrite(hFile, abZeroPg, PAGE_SIZE, NULL);
if (RT_FAILURE(rc))
- pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: RTFileWrite -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
+ DBGCCmdHlpPrintf(pCmdHlp, "error: RTFileWrite -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
}
}
else
@@ -3984,22 +3943,23 @@ static DECLCALLBACK(int) pgmR3CmdPhysToFile(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp,
rc = RTFileWrite(hFile, pvPage, PAGE_SIZE, NULL);
PGMPhysReleasePageMappingLock(pVM, &Lock);
if (RT_FAILURE(rc))
- pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: RTFileWrite -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
+ DBGCCmdHlpPrintf(pCmdHlp, "error: RTFileWrite -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
}
else
- pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: PGMPhysGCPhys2CCPtrReadOnly -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
+ DBGCCmdHlpPrintf(pCmdHlp, "error: PGMPhysGCPhys2CCPtrReadOnly -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
break;
}
default:
AssertFailed();
- case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
case PGMPAGETYPE_MMIO:
+ case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
+ case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
if (fIncZeroPgs)
{
rc = RTFileWrite(hFile, abZeroPg, PAGE_SIZE, NULL);
if (RT_FAILURE(rc))
- pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: RTFileWrite -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
+ DBGCCmdHlpPrintf(pCmdHlp, "error: RTFileWrite -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
}
break;
}
@@ -4015,7 +3975,7 @@ static DECLCALLBACK(int) pgmR3CmdPhysToFile(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp,
RTFileClose(hFile);
if (RT_SUCCESS(rc))
- return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Successfully saved physical memory to '%s'.\n", paArgs[0].u.pszString);
+ return DBGCCmdHlpPrintf(pCmdHlp, "Successfully saved physical memory to '%s'.\n", paArgs[0].u.pszString);
return VINF_SUCCESS;
}
diff --git a/src/VBox/VMM/VMMR3/PGMBth.h b/src/VBox/VMM/VMMR3/PGMBth.h
index 2a8bbe46..969cfc06 100644
--- a/src/VBox/VMM/VMMR3/PGMBth.h
+++ b/src/VBox/VMM/VMMR3/PGMBth.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2010 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -64,27 +64,30 @@ PGM_BTH_DECL(int, InitData)(PVM pVM, PPGMMODEDATA pModeData, bool fResolveGCAndR
{
int rc;
+ if (!HMIsEnabled(pVM))
+ {
#if PGM_SHW_TYPE != PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT /* No AMD64 for traditional virtualization, only VT-x and AMD-V. */
- /* GC */
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(Trap0eHandler), &pModeData->pfnRCBthTrap0eHandler);
- AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(Trap0eHandler), rc), rc);
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(InvalidatePage), &pModeData->pfnRCBthInvalidatePage);
- AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(InvalidatePage), rc), rc);
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(SyncCR3), &pModeData->pfnRCBthSyncCR3);
- AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(SyncCR3), rc), rc);
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(PrefetchPage), &pModeData->pfnRCBthPrefetchPage);
- AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(PrefetchPage), rc), rc);
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(VerifyAccessSyncPage),&pModeData->pfnRCBthVerifyAccessSyncPage);
- AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(VerifyAccessSyncPage), rc), rc);
+ /* RC */
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(Trap0eHandler), &pModeData->pfnRCBthTrap0eHandler);
+ AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(Trap0eHandler), rc), rc);
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(InvalidatePage), &pModeData->pfnRCBthInvalidatePage);
+ AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(InvalidatePage), rc), rc);
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(SyncCR3), &pModeData->pfnRCBthSyncCR3);
+ AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(SyncCR3), rc), rc);
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(PrefetchPage), &pModeData->pfnRCBthPrefetchPage);
+ AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(PrefetchPage), rc), rc);
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(VerifyAccessSyncPage),&pModeData->pfnRCBthVerifyAccessSyncPage);
+ AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(VerifyAccessSyncPage), rc), rc);
# ifdef VBOX_STRICT
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(AssertCR3), &pModeData->pfnRCBthAssertCR3);
- AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(AssertCR3), rc), rc);
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(AssertCR3), &pModeData->pfnRCBthAssertCR3);
+ AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(AssertCR3), rc), rc);
# endif
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(MapCR3), &pModeData->pfnRCBthMapCR3);
- AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(MapCR3), rc), rc);
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(UnmapCR3), &pModeData->pfnRCBthUnmapCR3);
- AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(UnmapCR3), rc), rc);
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(MapCR3), &pModeData->pfnRCBthMapCR3);
+ AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(MapCR3), rc), rc);
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(UnmapCR3), &pModeData->pfnRCBthUnmapCR3);
+ AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(UnmapCR3), rc), rc);
#endif /* Not AMD64 shadow paging. */
+ }
/* Ring 0 */
rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(Trap0eHandler), &pModeData->pfnR0BthTrap0eHandler);
@@ -131,7 +134,7 @@ PGM_BTH_DECL(int, Enter)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3)
PVM pVM = pVCpu->pVMR3;
- Assert(HWACCMIsNestedPagingActive(pVM) == pVM->pgm.s.fNestedPaging);
+ Assert(HMIsNestedPagingActive(pVM) == pVM->pgm.s.fNestedPaging);
Assert(!pVM->pgm.s.fNestedPaging);
pgmLock(pVM);
@@ -147,28 +150,26 @@ PGM_BTH_DECL(int, Enter)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3)
/* Mark the page as unlocked; allow flushing again. */
pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
+# ifndef PGM_WITHOUT_MAPPINGS
/* Remove the hypervisor mappings from the shadow page table. */
pgmMapDeactivateCR3(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
+# endif
- pgmPoolFreeByPage(pPool, pVCpu->pgm.s.pShwPageCR3R3, pVCpu->pgm.s.iShwUser, pVCpu->pgm.s.iShwUserTable);
+ pgmPoolFreeByPage(pPool, pVCpu->pgm.s.pShwPageCR3R3, NIL_PGMPOOL_IDX, UINT32_MAX);
pVCpu->pgm.s.pShwPageCR3R3 = 0;
pVCpu->pgm.s.pShwPageCR3RC = 0;
pVCpu->pgm.s.pShwPageCR3R0 = 0;
- pVCpu->pgm.s.iShwUser = 0;
- pVCpu->pgm.s.iShwUserTable = 0;
}
/* construct a fake address. */
GCPhysCR3 = RT_BIT_64(63);
- pVCpu->pgm.s.iShwUser = SHW_POOL_ROOT_IDX;
- pVCpu->pgm.s.iShwUserTable = GCPhysCR3 >> PAGE_SHIFT;
int rc = pgmPoolAlloc(pVM, GCPhysCR3, BTH_PGMPOOLKIND_ROOT, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
- pVCpu->pgm.s.iShwUser, pVCpu->pgm.s.iShwUserTable, false /*fLockPage*/,
+ NIL_PGMPOOL_IDX, UINT32_MAX, false /*fLockPage*/,
&pVCpu->pgm.s.pShwPageCR3R3);
if (rc == VERR_PGM_POOL_FLUSHED)
{
Log(("Bth-Enter: PGM pool flushed -> signal sync cr3\n"));
- Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
+ Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
pgmUnlock(pVM);
return VINF_PGM_SYNC_CR3;
}
@@ -183,8 +184,11 @@ PGM_BTH_DECL(int, Enter)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3)
/* Set the current hypervisor CR3. */
CPUMSetHyperCR3(pVCpu, PGMGetHyperCR3(pVCpu));
+# ifndef PGM_WITHOUT_MAPPINGS
/* Apply all hypervisor mappings to the new CR3. */
rc = pgmMapActivateCR3(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
+# endif
+
pgmUnlock(pVM);
return rc;
#else
diff --git a/src/VBox/VMM/VMMR3/PGMDbg.cpp b/src/VBox/VMM/VMMR3/PGMDbg.cpp
index 4b14520a..da9f7eda 100644
--- a/src/VBox/VMM/VMMR3/PGMDbg.cpp
+++ b/src/VBox/VMM/VMMR3/PGMDbg.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2010 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -23,6 +23,7 @@
#include <VBox/vmm/stam.h>
#include "PGMInternal.h"
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include "PGMInline.h"
#include <iprt/assert.h>
#include <iprt/asm.h>
@@ -102,13 +103,13 @@ typedef PGMR3DUMPHIERARCHYSTATE *PPGMR3DUMPHIERARCHYSTATE;
* @retval VINF_SUCCESS on success, *pGCPhys is set.
* @retval VERR_INVALID_POINTER if the pointer is not within the GC physical memory.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param R3Ptr The R3 pointer to convert.
* @param pGCPhys Where to store the GC physical address on success.
*/
-VMMR3DECL(int) PGMR3DbgR3Ptr2GCPhys(PVM pVM, RTR3PTR R3Ptr, PRTGCPHYS pGCPhys)
+VMMR3DECL(int) PGMR3DbgR3Ptr2GCPhys(PUVM pUVM, RTR3PTR R3Ptr, PRTGCPHYS pGCPhys)
{
- NOREF(pVM); NOREF(R3Ptr);
+ NOREF(pUVM); NOREF(R3Ptr);
*pGCPhys = NIL_RTGCPHYS;
return VERR_NOT_IMPLEMENTED;
}
@@ -124,13 +125,13 @@ VMMR3DECL(int) PGMR3DbgR3Ptr2GCPhys(PVM pVM, RTR3PTR R3Ptr, PRTGCPHYS pGCPhys)
* @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical page but has no physical backing.
* @retval VERR_INVALID_POINTER if the pointer is not within the GC physical memory.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param R3Ptr The R3 pointer to convert.
* @param pHCPhys Where to store the HC physical address on success.
*/
-VMMR3DECL(int) PGMR3DbgR3Ptr2HCPhys(PVM pVM, RTR3PTR R3Ptr, PRTHCPHYS pHCPhys)
+VMMR3DECL(int) PGMR3DbgR3Ptr2HCPhys(PUVM pUVM, RTR3PTR R3Ptr, PRTHCPHYS pHCPhys)
{
- NOREF(pVM); NOREF(R3Ptr);
+ NOREF(pUVM); NOREF(R3Ptr);
*pHCPhys = NIL_RTHCPHYS;
return VERR_NOT_IMPLEMENTED;
}
@@ -145,12 +146,15 @@ VMMR3DECL(int) PGMR3DbgR3Ptr2HCPhys(PVM pVM, RTR3PTR R3Ptr, PRTHCPHYS pHCPhys)
* @retval VINF_SUCCESS on success, *pGCPhys is set.
* @retval VERR_INVALID_POINTER if the HC physical address is not within the GC physical memory.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param HCPhys The HC physical address to convert.
* @param pGCPhys Where to store the GC physical address on success.
*/
-VMMR3DECL(int) PGMR3DbgHCPhys2GCPhys(PVM pVM, RTHCPHYS HCPhys, PRTGCPHYS pGCPhys)
+VMMR3DECL(int) PGMR3DbgHCPhys2GCPhys(PUVM pUVM, RTHCPHYS HCPhys, PRTGCPHYS pGCPhys)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
/*
* Validate and adjust the input a bit.
*/
@@ -161,7 +165,7 @@ VMMR3DECL(int) PGMR3DbgHCPhys2GCPhys(PVM pVM, RTHCPHYS HCPhys, PRTGCPHYS pGCPhys
if (HCPhys == 0)
return VERR_INVALID_POINTER;
- for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
+ for (PPGMRAMRANGE pRam = pUVM->pVM->pgm.s.CTX_SUFF(pRamRangesX);
pRam;
pRam = pRam->CTX_SUFF(pNext))
{
@@ -192,7 +196,7 @@ VMMR3DECL(int) PGMR3DbgHCPhys2GCPhys(PVM pVM, RTHCPHYS HCPhys, PRTGCPHYS pGCPhys
* partial reads are unwanted.
* @todo Unused?
*/
-VMMR3DECL(int) PGMR3DbgReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb, uint32_t fFlags, size_t *pcbRead)
+VMMR3_INT_DECL(int) PGMR3DbgReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb, uint32_t fFlags, size_t *pcbRead)
{
/* validate */
AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
@@ -243,7 +247,7 @@ VMMR3DECL(int) PGMR3DbgReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size
* if partial writes are unwanted.
* @todo Unused?
*/
-VMMR3DECL(int) PGMR3DbgWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb, uint32_t fFlags, size_t *pcbWritten)
+VMMR3_INT_DECL(int) PGMR3DbgWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb, uint32_t fFlags, size_t *pcbWritten)
{
/* validate */
AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
@@ -294,7 +298,7 @@ VMMR3DECL(int) PGMR3DbgWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSr
* partial reads are unwanted.
* @todo Unused?
*/
-VMMR3DECL(int) PGMR3DbgReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, uint32_t fFlags, size_t *pcbRead)
+VMMR3_INT_DECL(int) PGMR3DbgReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, uint32_t fFlags, size_t *pcbRead)
{
/* validate */
AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
@@ -350,7 +354,7 @@ VMMR3DECL(int) PGMR3DbgReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t
* if partial writes are unwanted.
* @todo Unused?
*/
-VMMR3DECL(int) PGMR3DbgWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, void const *pvSrc, size_t cb, uint32_t fFlags, size_t *pcbWritten)
+VMMR3_INT_DECL(int) PGMR3DbgWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, void const *pvSrc, size_t cb, uint32_t fFlags, size_t *pcbWritten)
{
/* validate */
AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
@@ -569,8 +573,8 @@ static bool pgmR3DbgScanPage(const uint8_t *pbPage, int32_t *poff, uint32_t cb,
* @param cbNeedle The length of the byte string. Max 256 bytes.
* @param pGCPhysHit Where to store the address of the first occurrence on success.
*/
-VMMR3DECL(int) PGMR3DbgScanPhysical(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cbRange, RTGCPHYS GCPhysAlign,
- const uint8_t *pabNeedle, size_t cbNeedle, PRTGCPHYS pGCPhysHit)
+VMMR3_INT_DECL(int) PGMR3DbgScanPhysical(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cbRange, RTGCPHYS GCPhysAlign,
+ const uint8_t *pabNeedle, size_t cbNeedle, PRTGCPHYS pGCPhysHit)
{
/*
* Validate and adjust the input a bit.
@@ -654,10 +658,10 @@ VMMR3DECL(int) PGMR3DbgScanPhysical(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cbRange,
for (;; offPage = 0)
{
PPGMPAGE pPage = &pRam->aPages[iPage];
- if ( ( !PGM_PAGE_IS_ZERO(pPage)
- || fAllZero)
- && !PGM_PAGE_IS_BALLOONED(pPage)
- && !PGM_PAGE_IS_MMIO(pPage))
+ if ( ( !PGM_PAGE_IS_ZERO(pPage)
+ || fAllZero)
+ && !PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)
+ && !PGM_PAGE_IS_BALLOONED(pPage))
{
void const *pvPage;
PGMPAGEMAPLOCK Lock;
@@ -729,8 +733,8 @@ VMMR3DECL(int) PGMR3DbgScanPhysical(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cbRange,
* @param cbNeedle The length of the byte string.
* @param pGCPtrHit Where to store the address of the first occurrence on success.
*/
-VMMR3DECL(int) PGMR3DbgScanVirtual(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, RTGCPTR cbRange, RTGCPTR GCPtrAlign,
- const uint8_t *pabNeedle, size_t cbNeedle, PRTGCUINTPTR pGCPtrHit)
+VMMR3_INT_DECL(int) PGMR3DbgScanVirtual(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, RTGCPTR cbRange, RTGCPTR GCPtrAlign,
+ const uint8_t *pabNeedle, size_t cbNeedle, PRTGCUINTPTR pGCPtrHit)
{
VMCPU_ASSERT_EMT(pVCpu);
@@ -770,11 +774,15 @@ VMMR3DECL(int) PGMR3DbgScanVirtual(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, RTGCPTR
cbRange -= Adj;
}
+ /* Only paged protected mode or long mode here, use the physical scan for
+ the other modes. */
+ PGMMODE enmMode = PGMGetGuestMode(pVCpu);
+ AssertReturn(PGMMODE_WITH_PAGING(enmMode), VERR_PGM_NOT_USED_IN_MODE);
+
/*
* Search the memory - ignore MMIO, zero and not-present pages.
*/
const bool fAllZero = ASMMemIsAll8(pabNeedle, cbNeedle, 0) == NULL;
- PGMMODE enmMode = PGMGetGuestMode(pVCpu);
RTGCPTR GCPtrMask = PGMMODE_IS_LONG_MODE(enmMode) ? UINT64_MAX : UINT32_MAX;
uint8_t abPrev[MAX_NEEDLE_SIZE];
size_t cbPrev = 0;
@@ -789,20 +797,20 @@ VMMR3DECL(int) PGMR3DbgScanVirtual(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, RTGCPTR
GCPtr &= ~(RTGCPTR)PAGE_OFFSET_MASK;
for (;; offPage = 0)
{
- RTGCPHYS GCPhys;
- int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
- if (RT_SUCCESS(rc))
+ PGMPTWALKGST Walk;
+ int rc = pgmGstPtWalk(pVCpu, GCPtr, &Walk);
+ if (RT_SUCCESS(rc) && Walk.u.Core.fSucceeded)
{
- PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
- if ( pPage
- && ( !PGM_PAGE_IS_ZERO(pPage)
- || fAllZero)
- && !PGM_PAGE_IS_BALLOONED(pPage)
- && !PGM_PAGE_IS_MMIO(pPage))
+ PPGMPAGE pPage = pgmPhysGetPage(pVM, Walk.u.Core.GCPhys);
+ if ( pPage
+ && ( !PGM_PAGE_IS_ZERO(pPage)
+ || fAllZero)
+ && !PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)
+ && !PGM_PAGE_IS_BALLOONED(pPage))
{
void const *pvPage;
PGMPAGEMAPLOCK Lock;
- rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvPage, &Lock);
+ rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, Walk.u.Core.GCPhys, &pvPage, &Lock);
if (RT_SUCCESS(rc))
{
int32_t offHit = offPage;
@@ -832,13 +840,70 @@ VMMR3DECL(int) PGMR3DbgScanVirtual(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, RTGCPTR
cbPrev = 0;
}
else
+ {
+ Assert(Walk.enmType != PGMPTWALKGSTTYPE_INVALID);
+ Assert(!Walk.u.Core.fSucceeded);
cbPrev = 0; /* ignore error. */
+ /*
+ * Try skip as much as possible. No need to figure out that a PDE
+ * is not present 512 times!
+ */
+ uint64_t cPagesCanSkip;
+ switch (Walk.u.Core.uLevel)
+ {
+ case 1:
+ /* page level, use cIncPages */
+ cPagesCanSkip = 1;
+ break;
+ case 2:
+ if (Walk.enmType == PGMPTWALKGSTTYPE_32BIT)
+ {
+ cPagesCanSkip = X86_PG_ENTRIES - ((GCPtr >> X86_PT_SHIFT) & X86_PT_MASK);
+ Assert(!((GCPtr + ((RTGCPTR)cPagesCanSkip << X86_PT_PAE_SHIFT)) & (RT_BIT_64(X86_PD_SHIFT) - 1)));
+ }
+ else
+ {
+ cPagesCanSkip = X86_PG_PAE_ENTRIES - ((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK);
+ Assert(!((GCPtr + ((RTGCPTR)cPagesCanSkip << X86_PT_PAE_SHIFT)) & (RT_BIT_64(X86_PD_PAE_SHIFT) - 1)));
+ }
+ break;
+ case 3:
+ cPagesCanSkip = (X86_PG_PAE_ENTRIES - ((GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK)) * X86_PG_PAE_ENTRIES
+ - ((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK);
+ Assert(!((GCPtr + ((RTGCPTR)cPagesCanSkip << X86_PT_PAE_SHIFT)) & (RT_BIT_64(X86_PDPT_SHIFT) - 1)));
+ break;
+ case 4:
+ cPagesCanSkip = (X86_PG_PAE_ENTRIES - ((GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64))
+ * X86_PG_PAE_ENTRIES * X86_PG_PAE_ENTRIES
+ - ((((GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK)) * X86_PG_PAE_ENTRIES)
+ - (( GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK);
+ Assert(!((GCPtr + ((RTGCPTR)cPagesCanSkip << X86_PT_PAE_SHIFT)) & (RT_BIT_64(X86_PML4_SHIFT) - 1)));
+ break;
+ case 8:
+ /* The CR3 value is bad, forget the whole search. */
+ cPagesCanSkip = cPages;
+ break;
+ default:
+ AssertMsgFailed(("%d\n", Walk.u.Core.uLevel));
+ cPagesCanSkip = 0;
+ break;
+ }
+ if (cPages <= cPagesCanSkip)
+ break;
+ if (cPagesCanSkip >= cIncPages)
+ {
+ cPages -= cPagesCanSkip;
+ GCPtr += (RTGCPTR)cPagesCanSkip << X86_PT_PAE_SHIFT;
+ continue;
+ }
+ }
+
/* advance to the next page. */
if (cPages <= cIncPages)
break;
cPages -= cIncPages;
- GCPtr += (RTGCPTR)cIncPages << PAGE_SHIFT;
+ GCPtr += (RTGCPTR)cIncPages << X86_PT_PAE_SHIFT;
}
return VERR_DBGF_MEM_NOT_FOUND;
}
@@ -1037,7 +1102,7 @@ static void pgmR3DumpHierarchyShwGuestPageInfo(PPGMR3DUMPHIERARCHYSTATE pState,
{
char szPage[80];
RTGCPHYS GCPhys;
- int rc = PGMR3DbgHCPhys2GCPhys(pState->pVM, HCPhys, &GCPhys);
+ int rc = PGMR3DbgHCPhys2GCPhys(pState->pVM->pUVM, HCPhys, &GCPhys);
if (RT_SUCCESS(rc))
{
pgmLock(pState->pVM);
@@ -1680,7 +1745,7 @@ VMMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint64_t cr3, uint64_t cr4, bool fL
if (fLongMode)
fFlags |= DBGFPGDMP_FLAGS_LME;
- return DBGFR3PagingDumpEx(pVM, pVCpu->idCpu, fFlags, cr3, 0, fLongMode ? UINT64_MAX : UINT32_MAX, cMaxDepth, pHlp);
+ return DBGFR3PagingDumpEx(pVM->pUVM, pVCpu->idCpu, fFlags, cr3, 0, fLongMode ? UINT64_MAX : UINT32_MAX, cMaxDepth, pHlp);
}
diff --git a/src/VBox/VMM/VMMR3/PGMGst.h b/src/VBox/VMM/VMMR3/PGMGst.h
index a5bfe728..64e07470 100644
--- a/src/VBox/VMM/VMMR3/PGMGst.h
+++ b/src/VBox/VMM/VMMR3/PGMGst.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -57,15 +57,18 @@ PGM_GST_DECL(int, InitData)(PVM pVM, PPGMMODEDATA pModeData, bool fResolveGCAndR
{
int rc;
+ if (!HMIsEnabled(pVM))
+ {
#if PGM_SHW_TYPE != PGM_TYPE_AMD64 /* No AMD64 for traditional virtualization, only VT-x and AMD-V. */
- /* GC */
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(GetPage), &pModeData->pfnRCGstGetPage);
- AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(GetPage), rc), rc);
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(ModifyPage), &pModeData->pfnRCGstModifyPage);
- AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(ModifyPage), rc), rc);
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(GetPDE), &pModeData->pfnRCGstGetPDE);
- AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(GetPDE), rc), rc);
+ /* RC */
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(GetPage), &pModeData->pfnRCGstGetPage);
+ AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(GetPage), rc), rc);
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(ModifyPage), &pModeData->pfnRCGstModifyPage);
+ AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(ModifyPage), rc), rc);
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(GetPDE), &pModeData->pfnRCGstGetPDE);
+ AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(GetPDE), rc), rc);
#endif /* Not AMD64 shadow paging. */
+ }
/* Ring-0 */
rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(GetPage), &pModeData->pfnR0GstGetPage);
diff --git a/src/VBox/VMM/VMMR3/PGMHandler.cpp b/src/VBox/VMM/VMMR3/PGMHandler.cpp
index 5de793d9..bfffa970 100644
--- a/src/VBox/VMM/VMMR3/PGMHandler.cpp
+++ b/src/VBox/VMM/VMMR3/PGMHandler.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2010 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -51,7 +51,7 @@
#include <iprt/string.h>
#include <VBox/param.h>
#include <VBox/err.h>
-#include <VBox/vmm/hwaccm.h>
+#include <VBox/vmm/hm.h>
/*******************************************************************************
@@ -119,7 +119,8 @@ VMMR3DECL(int) PGMR3HandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType,
* Resolve the GC handler.
*/
RTRCPTR pfnHandlerRC = NIL_RTRCPTR;
- rc = PDMR3LdrGetSymbolRCLazy(pVM, pszModRC, NULL /*pszSearchPath*/, pszHandlerRC, &pfnHandlerRC);
+ if (!HMIsEnabled(pVM))
+ rc = PDMR3LdrGetSymbolRCLazy(pVM, pszModRC, NULL /*pszSearchPath*/, pszHandlerRC, &pfnHandlerRC);
if (RT_SUCCESS(rc))
return PGMHandlerPhysicalRegisterEx(pVM, enmType, GCPhys, GCPhysLast, pfnHandlerR3, pvUserR3,
pfnHandlerR0, pvUserR0, pfnHandlerRC, pvUserRC, pszDesc);
@@ -239,7 +240,7 @@ VMMR3DECL(int) PGMR3HandlerVirtualRegister(PVM pVM, PGMVIRTHANDLERTYPE enmType,
enmType, GCPtr, GCPtrLast, pszHandlerRC, pszHandlerRC, pszModRC, pszModRC, pszDesc));
/* Not supported/relevant for VT-x and AMD-V. */
- if (HWACCMIsEnabled(pVM))
+ if (HMIsEnabled(pVM))
return VERR_NOT_IMPLEMENTED;
/*
@@ -259,7 +260,8 @@ VMMR3DECL(int) PGMR3HandlerVirtualRegister(PVM pVM, PGMVIRTHANDLERTYPE enmType,
RTRCPTR pfnHandlerRC;
int rc = PDMR3LdrGetSymbolRCLazy(pVM, pszModRC, NULL /*pszSearchPath*/, pszHandlerRC, &pfnHandlerRC);
if (RT_SUCCESS(rc))
- return PGMR3HandlerVirtualRegisterEx(pVM, enmType, GCPtr, GCPtrLast, pfnInvalidateR3, pfnHandlerR3, pfnHandlerRC, pszDesc);
+ return PGMR3HandlerVirtualRegisterEx(pVM, enmType, GCPtr, GCPtrLast, pfnInvalidateR3,
+ pfnHandlerR3, pfnHandlerRC, pszDesc);
AssertMsgFailed(("Failed to resolve %s.%s, rc=%Rrc.\n", pszModRC, pszHandlerRC, rc));
return rc;
@@ -292,7 +294,7 @@ VMMDECL(int) PGMR3HandlerVirtualRegisterEx(PVM pVM, PGMVIRTHANDLERTYPE enmType,
enmType, GCPtr, GCPtrLast, pfnInvalidateR3, pfnHandlerR3, pfnHandlerRC, pszDesc));
/* Not supported/relevant for VT-x and AMD-V. */
- if (HWACCMIsEnabled(pVM))
+ if (HMIsEnabled(pVM))
return VERR_NOT_IMPLEMENTED;
/*
@@ -408,9 +410,8 @@ VMMDECL(int) PGMR3HandlerVirtualRegisterEx(PVM pVM, PGMVIRTHANDLERTYPE enmType,
pgmUnlock(pVM);
#ifdef VBOX_WITH_STATISTICS
- char szPath[256];
- RTStrPrintf(szPath, sizeof(szPath), "/PGM/VirtHandler/Calls/%RGv-%RGv", pNew->Core.Key, pNew->Core.KeyLast);
- rc = STAMR3Register(pVM, &pNew->Stat, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szPath, STAMUNIT_TICKS_PER_CALL, pszDesc);
+ rc = STAMR3RegisterF(pVM, &pNew->Stat, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, pszDesc,
+ "/PGM/VirtHandler/Calls/%RGv-%RGv", pNew->Core.Key, pNew->Core.KeyLast);
AssertRC(rc);
#endif
return VINF_SUCCESS;
@@ -493,7 +494,9 @@ VMMDECL(int) PGMHandlerVirtualDeregister(PVM pVM, RTGCPTR GCPtr)
if (RT_UNLIKELY(!pCur))
{
pgmUnlock(pVM);
+#ifndef DEBUG_sander
AssertMsgFailed(("Range %#x not found!\n", GCPtr));
+#endif
return VERR_INVALID_PARAMETER;
}
@@ -504,7 +507,9 @@ VMMDECL(int) PGMHandlerVirtualDeregister(PVM pVM, RTGCPTR GCPtr)
pgmUnlock(pVM);
- STAM_DEREG(pVM, &pCur->Stat);
+#ifdef VBOX_WITH_STATISTICS
+ STAMR3DeregisterF(pVM->pUVM, "/PGM/VirtHandler/Calls/%RGv-%RGv", pCur->Core.Key, pCur->Core.KeyLast);
+#endif
MMHyperFree(pVM, pCur);
return VINF_SUCCESS;
@@ -580,9 +585,9 @@ DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *ps
pHlp->pfnPrintf(pHlp,
"Hypervisor Virtual handlers:\n"
"%*s %*s %*s %*s Type Description\n",
- - (int)sizeof(RTGCPTR) * 2, "From",
- - (int)sizeof(RTGCPTR) * 2 - 3, "- To (excl)",
- - (int)sizeof(RTHCPTR) * 2 - 1, "HandlerHC",
+ - (int)sizeof(RTGCPTR) * 2, "From",
+ - (int)sizeof(RTGCPTR) * 2 - 3, "- To (excl)",
+ - (int)sizeof(RTHCPTR) * 2 - 1, "HandlerHC",
- (int)sizeof(RTRCPTR) * 2 - 1, "HandlerGC");
RTAvlroGCPtrDoWithAll(&pVM->pgm.s.pTreesR3->HyperVirtHandlers, true, pgmR3InfoHandlersVirtualOne, &Args);
}
diff --git a/src/VBox/VMM/VMMR3/PGMMap.cpp b/src/VBox/VMM/VMMR3/PGMMap.cpp
index 39d2065a..d9ebe488 100644
--- a/src/VBox/VMM/VMMR3/PGMMap.cpp
+++ b/src/VBox/VMM/VMMR3/PGMMap.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -36,10 +36,15 @@
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
+#ifndef PGM_WITHOUT_MAPPINGS
static void pgmR3MapClearPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE);
static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
static int pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
+#else
+# define pgmR3MapClearPDEs(pVM, pMap, iNewPDE) do { } while (0)
+# define pgmR3MapSetPDEs(pVM, pMap, iNewPDE) do { } while (0)
+#endif
/**
@@ -209,6 +214,7 @@ VMMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, uint32_t fFlags,
return VINF_SUCCESS;
}
+#ifdef VBOX_WITH_UNUSED_CODE
/**
* Removes a page table based mapping.
@@ -280,6 +286,8 @@ VMMR3DECL(int) PGMR3UnmapPT(PVM pVM, RTGCPTR GCPtr)
return VERR_INVALID_PARAMETER;
}
+#endif /* unused */
+
/**
* Checks whether a range of PDEs in the intermediate
@@ -483,8 +491,10 @@ VMMR3DECL(int) PGMR3FinalizeMappings(PVM pVM)
VMMR3DECL(int) PGMR3MappingsSize(PVM pVM, uint32_t *pcb)
{
RTGCPTR cb = 0;
+#ifndef PGM_WITHOUT_MAPPINGS
for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
cb += pCur->cb;
+#endif
*pcb = cb;
AssertReturn(*pcb == cb, VERR_NUMBER_TOO_BIG);
@@ -503,34 +513,34 @@ VMMR3DECL(int) PGMR3MappingsSize(PVM pVM, uint32_t *pcb)
*/
VMMR3DECL(int) PGMR3MappingsFix(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb)
{
- Log(("PGMR3MappingsFix: GCPtrBase=%RGv cb=%#x (fMappingsFixed=%RTbool fMappingsDisabled=%RTbool)\n",
- GCPtrBase, cb, pVM->pgm.s.fMappingsFixed, pVM->pgm.s.fMappingsDisabled));
+ Log(("PGMR3MappingsFix: GCPtrBase=%RGv cb=%#x (fMappingsFixed=%RTbool MappingEnabled=%RTbool)\n",
+ GCPtrBase, cb, pVM->pgm.s.fMappingsFixed, pgmMapAreMappingsEnabled(pVM)));
- /*
- * Ignore the additions mapping fix call if disabled.
- */
- if (!pgmMapAreMappingsEnabled(pVM))
+#ifndef PGM_WITHOUT_MAPPINGS
+ if (pgmMapAreMappingsEnabled(pVM))
{
- Assert(HWACCMIsEnabled(pVM));
- return VINF_SUCCESS;
- }
+ /*
+ * Only applies to VCPU 0 as we don't support SMP guests with raw mode.
+ */
+ Assert(pVM->cCpus == 1);
+ PVMCPU pVCpu = &pVM->aCpus[0];
- /*
- * Only applies to VCPU 0 as we don't support SMP guests with raw mode.
- */
- Assert(pVM->cCpus == 1);
- PVMCPU pVCpu = &pVM->aCpus[0];
+ /*
+ * Before we do anything we'll do a forced PD sync to try make sure any
+ * pending relocations because of these mappings have been resolved.
+ */
+ PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), true);
- /*
- * Before we do anything we'll do a forced PD sync to try make sure any
- * pending relocations because of these mappings have been resolved.
- */
- PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), true);
+ return pgmR3MappingsFixInternal(pVM, GCPtrBase, cb);
+ }
+#endif /* !PGM_WITHOUT_MAPPINGS */
- return pgmR3MappingsFixInternal(pVM, GCPtrBase, cb);
+ Assert(HMIsEnabled(pVM));
+ return VINF_SUCCESS;
}
+#ifndef PGM_WITHOUT_MAPPINGS
/**
* Internal worker for PGMR3MappingsFix and pgmR3Load.
*
@@ -670,46 +680,7 @@ int pgmR3MappingsFixInternal(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb)
}
return VINF_SUCCESS;
}
-
-
-/**
- * Interface for disabling the guest mappings when switching to HWACCM mode
- * during VM creation and VM reset.
- *
- * (This doesn't touch the intermediate table!)
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- */
-VMMR3DECL(int) PGMR3MappingsDisable(PVM pVM)
-{
- AssertReturn(!pVM->pgm.s.fMappingsFixed, VERR_PGM_MAPPINGS_FIXED);
- AssertReturn(!pVM->pgm.s.fMappingsFixedRestored, VERR_PGM_MAPPINGS_FIXED);
- if (pVM->pgm.s.fMappingsDisabled)
- return VINF_SUCCESS;
-
- /*
- * Deactivate (only applies to Virtual CPU #0).
- */
- if (pVM->aCpus[0].pgm.s.pShwPageCR3R3)
- {
- pgmLock(pVM); /* to avoid assertions */
- int rc = pgmMapDeactivateCR3(pVM, pVM->aCpus[0].pgm.s.pShwPageCR3R3);
- pgmUnlock(pVM);
- AssertRCReturn(rc, rc);
- }
-
- /*
- * Mark the mappings as disabled and trigger a CR3 re-sync.
- */
- pVM->pgm.s.fMappingsDisabled = true;
- for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
- {
- pVM->aCpus[idCpu].pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
- VMCPU_FF_SET(&pVM->aCpus[idCpu], VMCPU_FF_PGM_SYNC_CR3);
- }
- return VINF_SUCCESS;
-}
+#endif /*!PGM_WITHOUT_MAPPINGS*/
/**
@@ -724,7 +695,7 @@ VMMR3DECL(int) PGMR3MappingsDisable(PVM pVM)
*/
VMMR3DECL(int) PGMR3MappingsUnfix(PVM pVM)
{
- Log(("PGMR3MappingsUnfix: fMappingsFixed=%RTbool fMappingsDisabled=%RTbool\n", pVM->pgm.s.fMappingsFixed, pVM->pgm.s.fMappingsDisabled));
+ Log(("PGMR3MappingsUnfix: fMappingsFixed=%RTbool MappingsEnabled=%RTbool\n", pVM->pgm.s.fMappingsFixed, pgmMapAreMappingsEnabled(pVM)));
if ( pgmMapAreMappingsEnabled(pVM)
&& ( pVM->pgm.s.fMappingsFixed
|| pVM->pgm.s.fMappingsFixedRestored)
@@ -757,6 +728,7 @@ VMMR3DECL(bool) PGMR3MappingsNeedReFixing(PVM pVM)
return pVM->pgm.s.fMappingsFixedRestored;
}
+#ifndef PGM_WITHOUT_MAPPINGS
/**
* Map pages into the intermediate context (switcher code).
@@ -1041,6 +1013,7 @@ static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
/* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
pPGM->pInterPD->a[iNewPDE] = Pde;
+
/*
* PAE.
*/
@@ -1344,6 +1317,7 @@ int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOl
return VERR_PGM_NO_HYPERVISOR_ADDRESS;
}
+#endif /* !PGM_WITHOUT_MAPPINGS */
/**
* Read memory from the guest mappings.
@@ -1441,7 +1415,7 @@ VMMR3DECL(int) PGMR3MapRead(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
{
NOREF(pszArgs);
- if (pVM->pgm.s.fMappingsDisabled)
+ if (!pgmMapAreMappingsEnabled(pVM))
pHlp->pfnPrintf(pHlp, "\nThe mappings are DISABLED.\n");
else if (pVM->pgm.s.fMappingsFixed)
pHlp->pfnPrintf(pHlp, "\nThe mappings are FIXED: %RGv-%RGv\n",
diff --git a/src/VBox/VMM/VMMR3/PGMPhys.cpp b/src/VBox/VMM/VMMR3/PGMPhys.cpp
index 9c0b51c1..9a21f28e 100644
--- a/src/VBox/VMM/VMMR3/PGMPhys.cpp
+++ b/src/VBox/VMM/VMMR3/PGMPhys.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2012 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -30,6 +30,7 @@
#include <VBox/vmm/pdmdev.h>
#include "PGMInternal.h"
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include "PGMInline.h"
#include <VBox/sup.h>
#include <VBox/param.h>
@@ -142,14 +143,15 @@ VMMR3DECL(int) PGMR3PhysReadExternal(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size
* If the page has an ALL access handler, we'll have to
* delegate the job to EMT.
*/
- if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
+ if ( PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
+ || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
{
pgmUnlock(pVM);
return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysReadExternalEMT, 4,
pVM, &GCPhys, pvBuf, cbRead);
}
- Assert(!PGM_PAGE_IS_MMIO(pPage));
+ Assert(!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage));
/*
* Simple stuff, go ahead.
@@ -280,7 +282,8 @@ VMMDECL(int) PGMR3PhysWriteExternal(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf,
* dealt with here.
*/
if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
- || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
+ || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
+ || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
{
if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
&& !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
@@ -293,7 +296,7 @@ VMMDECL(int) PGMR3PhysWriteExternal(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf,
pVM, &GCPhys, pvBuf, cbWrite);
}
}
- Assert(!PGM_PAGE_IS_MMIO(pPage));
+ Assert(!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage));
/*
* Simple stuff, go ahead.
@@ -376,7 +379,7 @@ static DECLCALLBACK(int) pgmR3PhysGCPhys2CCPtrDelegated(PVM pVM, PRTGCPHYS pGCPh
int rc2 = pgmPhysPageQueryTlbe(pVM, *pGCPhys, &pTlbe);
AssertFatalRC(rc2);
PPGMPAGE pPage = pTlbe->pPage;
- if (PGM_PAGE_IS_MMIO(pPage))
+ if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
{
PGMPhysReleasePageMappingLock(pVM, pLock);
rc = VERR_PGM_PHYS_PAGE_RESERVED;
@@ -447,7 +450,7 @@ VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrExternal(PVM pVM, RTGCPHYS GCPhys, void **pp
if (RT_SUCCESS(rc))
{
PPGMPAGE pPage = pTlbe->pPage;
- if (PGM_PAGE_IS_MMIO(pPage))
+ if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
rc = VERR_PGM_PHYS_PAGE_RESERVED;
else
{
@@ -550,7 +553,7 @@ VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, v
PPGMPAGE pPage = pTlbe->pPage;
#if 1
/* MMIO pages doesn't have any readable backing. */
- if (PGM_PAGE_IS_MMIO(pPage))
+ if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
rc = VERR_PGM_PHYS_PAGE_RESERVED;
#else
if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
@@ -1360,7 +1363,7 @@ VMMR3DECL(int) PGMR3PhysGetRange(PVM pVM, uint32_t iRange, PRTGCPHYS pGCPhysStar
* Query the amount of free memory inside VMMR0
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pcbAllocMem Where to return the amount of memory allocated
* by VMs.
* @param pcbFreeMem Where to return the amount of memory that is
@@ -1371,14 +1374,17 @@ VMMR3DECL(int) PGMR3PhysGetRange(PVM pVM, uint32_t iRange, PRTGCPHYS pGCPhysStar
* @param pcbSharedMem Where to return the amount of memory that is
* currently shared.
*/
-VMMR3DECL(int) PGMR3QueryGlobalMemoryStats(PVM pVM, uint64_t *pcbAllocMem, uint64_t *pcbFreeMem,
+VMMR3DECL(int) PGMR3QueryGlobalMemoryStats(PUVM pUVM, uint64_t *pcbAllocMem, uint64_t *pcbFreeMem,
uint64_t *pcbBallonedMem, uint64_t *pcbSharedMem)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
uint64_t cAllocPages = 0;
uint64_t cFreePages = 0;
uint64_t cBalloonPages = 0;
uint64_t cSharedPages = 0;
- int rc = GMMR3QueryHypervisorMemoryStats(pVM, &cAllocPages, &cFreePages, &cBalloonPages, &cSharedPages);
+ int rc = GMMR3QueryHypervisorMemoryStats(pUVM->pVM, &cAllocPages, &cFreePages, &cBalloonPages, &cSharedPages);
AssertRCReturn(rc, rc);
if (pcbAllocMem)
@@ -1403,7 +1409,7 @@ VMMR3DECL(int) PGMR3QueryGlobalMemoryStats(PVM pVM, uint64_t *pcbAllocMem, uint6
* Query memory stats for the VM.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pcbTotalMem Where to return total amount memory the VM may
* possibly use.
* @param pcbPrivateMem Where to return the amount of private memory
@@ -1424,9 +1430,11 @@ VMMR3DECL(int) PGMR3QueryGlobalMemoryStats(PVM pVM, uint64_t *pcbAllocMem, uint6
* potentially be shared? Doesn't this mean the first VM gets a much
* lower number of shared pages?
*/
-VMMR3DECL(int) PGMR3QueryMemoryStats(PVM pVM, uint64_t *pcbTotalMem, uint64_t *pcbPrivateMem,
+VMMR3DECL(int) PGMR3QueryMemoryStats(PUVM pUVM, uint64_t *pcbTotalMem, uint64_t *pcbPrivateMem,
uint64_t *pcbSharedMem, uint64_t *pcbZeroMem)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
if (pcbTotalMem)
@@ -1561,7 +1569,7 @@ static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRam
void *pvChunk = NULL;
int rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk,
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
- VMMIsHwVirtExtForced(pVM) ? &R0PtrChunk : NULL,
+ HMIsEnabled(pVM) ? &R0PtrChunk : NULL,
#else
NULL,
#endif
@@ -1569,7 +1577,7 @@ static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRam
if (RT_SUCCESS(rc))
{
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
- if (!VMMIsHwVirtExtForced(pVM))
+ if (!HMIsEnabled(pVM))
R0PtrChunk = NIL_RTR0PTR;
#else
R0PtrChunk = (uintptr_t)pvChunk;
@@ -1691,7 +1699,7 @@ VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const
*/
uint32_t cbChunk;
uint32_t cPagesPerChunk;
- if (VMMIsHwVirtExtForced(pVM))
+ if (HMIsEnabled(pVM))
{
cbChunk = 16U*_1M;
cPagesPerChunk = 1048048; /* max ~1048059 */
@@ -1878,7 +1886,7 @@ void pgmR3PhysAssertSharedPageChecksums(PVM pVM)
/**
- * Resets (zeros) the RAM.
+ * Resets the physical memory state.
*
* ASSUMES that the caller owns the PGM lock.
*
@@ -1903,13 +1911,29 @@ int pgmR3PhysRamReset(PVM pVM)
pVM->pgm.s.cReusedSharedPages = 0;
pVM->pgm.s.cBalloonedPages = 0;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Resets (zeros) the RAM after all devices and components have been reset.
+ *
+ * ASSUMES that the caller owns the PGM lock.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ */
+int pgmR3PhysRamZeroAll(PVM pVM)
+{
+ PGM_LOCK_ASSERT_OWNER(pVM);
+
/*
* We batch up pages that should be freed instead of calling GMM for
* each and every one of them.
*/
uint32_t cPendingPages = 0;
PGMMFREEPAGESREQ pReq;
- rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
+ int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
AssertLogRelRCReturn(rc, rc);
/*
@@ -1956,6 +1980,7 @@ int pgmR3PhysRamReset(PVM pVM)
break;
case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
+ case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: /** @todo perhaps leave the special page alone? I don't think VT-x copes with this code. */
pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT),
true /*fDoAccounting*/);
break;
@@ -2009,6 +2034,7 @@ int pgmR3PhysRamReset(PVM pVM)
break;
case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
+ case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: /** @todo perhaps leave the special page alone? I don't think VT-x copes with this code. */
pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT),
true /*fDoAccounting*/);
break;
@@ -2036,7 +2062,6 @@ int pgmR3PhysRamReset(PVM pVM)
AssertLogRelRCReturn(rc, rc);
}
GMMR3FreePagesCleanup(pReq);
-
return VINF_SUCCESS;
}
@@ -2105,6 +2130,7 @@ int pgmR3PhysRamTerm(PVM pVM)
break;
case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
+ case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
case PGMPAGETYPE_MMIO2:
case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
case PGMPAGETYPE_ROM:
@@ -2128,6 +2154,7 @@ int pgmR3PhysRamTerm(PVM pVM)
return VINF_SUCCESS;
}
+
/**
* This is the interface IOM is using to register an MMIO region.
*
@@ -2341,15 +2368,16 @@ VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
while (cLeft-- > 0)
{
PPGMPAGE pPage = &pRam->aPages[iPage];
- if ( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO
+ if ( !PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)
/*|| not-out-of-action later */)
{
fAllMMIO = false;
- Assert(PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO2_ALIAS_MMIO);
AssertMsgFailed(("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
break;
}
- Assert(PGM_PAGE_IS_ZERO(pPage));
+ Assert( PGM_PAGE_IS_ZERO(pPage)
+ || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
+ || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
pPage++;
}
if (fAllMMIO)
@@ -2387,9 +2415,11 @@ VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
while (cLeft--)
{
PPGMPAGE pPage = &pRam->aPages[iPage];
- AssertMsg(PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
- AssertMsg(PGM_PAGE_IS_ZERO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
- if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO)
+ AssertMsg( (PGM_PAGE_IS_MMIO(pPage) && PGM_PAGE_IS_ZERO(pPage))
+ || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
+ || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
+ ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
+ if (PGM_PAGE_IS_MMIO_OR_ALIAS(pPage))
PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_RAM);
}
break;
@@ -2466,7 +2496,8 @@ DECLINLINE(PPGMMMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint3
* the memory.
* @param pszDesc The description.
*/
-VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, void **ppv, const char *pszDesc)
+VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags,
+ void **ppv, const char *pszDesc)
{
/*
* Validate input.
@@ -2484,7 +2515,7 @@ VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iReg
const uint32_t cPages = cb >> PAGE_SHIFT;
AssertLogRelReturn(((RTGCPHYS)cPages << PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER);
- AssertLogRelReturn(cPages <= INT32_MAX / 2, VERR_NO_MEMORY);
+ AssertLogRelReturn(cPages <= PGM_MMIO2_MAX_PAGE_COUNT, VERR_NO_MEMORY);
/*
* For the 2nd+ instance, mangle the description string so it's unique.
@@ -2497,6 +2528,20 @@ VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iReg
}
/*
+ * Allocate an MMIO2 range ID (not freed on failure).
+ * The zero ID is not used as it could be confused with NIL_GMM_PAGEID.
+ */
+ pgmLock(pVM);
+ uint8_t idMmio2 = pVM->pgm.s.cMmio2Regions + 1;
+ if (idMmio2 > PGM_MMIO2_MAX_RANGES)
+ {
+ pgmUnlock(pVM);
+ AssertLogRelFailedReturn(VERR_PGM_TOO_MANY_MMIO2_RANGES);
+ }
+ pVM->pgm.s.cMmio2Regions = idMmio2;
+ pgmUnlock(pVM);
+
+ /*
* Try reserve and allocate the backing memory first as this is what is
* most likely to fail.
*/
@@ -2527,6 +2572,7 @@ VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iReg
//pNew->fOverlapping = false;
pNew->iRegion = iRegion;
pNew->idSavedState = UINT8_MAX;
+ pNew->idMmio2 = idMmio2;
pNew->RamRange.pSelfR0 = MMHyperCCToR0(pVM, &pNew->RamRange);
pNew->RamRange.pSelfRC = MMHyperCCToRC(pVM, &pNew->RamRange);
pNew->RamRange.GCPhys = NIL_RTGCPHYS;
@@ -2541,7 +2587,8 @@ VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iReg
while (iPage-- > 0)
{
PGM_PAGE_INIT(&pNew->RamRange.aPages[iPage],
- paPages[iPage].Phys, NIL_GMM_PAGEID,
+ paPages[iPage].Phys,
+ PGM_MMIO2_PAGEID_MAKE(idMmio2, iPage),
PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
}
@@ -2553,9 +2600,14 @@ VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iReg
* Link it into the list.
* Since there is no particular order, just push it.
*/
+ /** @todo we can save us the linked list now, just search the lookup table... */
pgmLock(pVM);
+ Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == NULL);
+ Assert(pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] == NIL_RTR0PTR);
pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3;
pVM->pgm.s.pMmio2RangesR3 = pNew;
+ pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = pNew;
+ pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = MMHyperCCToR0(pVM, pNew);
pgmUnlock(pVM);
*ppv = pvPages;
@@ -2629,6 +2681,11 @@ VMMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iR
pVM->pgm.s.pMmio2RangesR3 = pNext;
pCur->pNextR3 = NULL;
+ uint8_t idMmio2 = pCur->idMmio2;
+ Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == pCur);
+ pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = NULL;
+ pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = NIL_RTR0PTR;
+
/*
* Free the memory.
*/
@@ -3545,8 +3602,8 @@ static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void
/**
- * Called by PGMR3Reset to reset the shadow, switch to the virgin,
- * and verify that the virgin part is untouched.
+ * Called by PGMR3MemSetup to reset the shadow, switch to the virgin, and verify
+ * that the virgin part is untouched.
*
* This is done after the normal memory has been cleared.
*
@@ -3635,8 +3692,17 @@ int pgmR3PhysRomReset(PVM pVM)
break;
if (memcmp(pvDstPage, pbSrcPage, RT_MIN(cbSrcLeft, PAGE_SIZE)))
+ {
+# ifdef DEBUG_bird /* This is darn handy for EFI debugging w/ snapshots, should be made default later. */
+ void *pvDstPageW;
+ rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pvDstPageW);
+ AssertRCReturn(rc, rc);
+ memcpy(pvDstPageW, pbSrcPage, RT_MIN(cbSrcLeft, PAGE_SIZE));
+# else
LogRel(("pgmR3PhysRomReset: %RGp rom page changed (%s) - loaded saved state?\n",
GCPhys, pRom->pszDesc));
+# endif
+ }
cbSrcLeft -= RT_MIN(cbSrcLeft, PAGE_SIZE);
}
}
@@ -3794,7 +3860,7 @@ VMMDECL(void) PGMR3PhysSetA20(PVMCPU pVCpu, bool fEnable)
pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
pgmR3RefreshShadowModeAfterA20Change(pVCpu);
- HWACCMFlushTLB(pVCpu);
+ HMFlushTLB(pVCpu);
#endif
STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cA20Changes);
}
@@ -4325,7 +4391,7 @@ VMMR3DECL(int) PGMR3PhysAllocateLargeHandyPage(PVM pVM, RTGCPHYS GCPhys)
* @param pVM Pointer to the VM.
*
* @remarks The VINF_EM_NO_MEMORY status is for the benefit of the FF processing
- * in EM.cpp and shouldn't be propagated outside TRPM, HWACCM, EM and
+ * in EM.cpp and shouldn't be propagated outside TRPM, HM, EM and
* pgmPhysEnsureHandyPage. There is one exception to this in the \#PF
* handler.
*/
diff --git a/src/VBox/VMM/VMMR3/PGMPhysRWTmpl.h b/src/VBox/VMM/VMMR3/PGMPhysRWTmpl.h
index 4a9388e3..790e9f0d 100644
--- a/src/VBox/VMM/VMMR3/PGMPhysRWTmpl.h
+++ b/src/VBox/VMM/VMMR3/PGMPhysRWTmpl.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
diff --git a/src/VBox/VMM/VMMR3/PGMPool.cpp b/src/VBox/VMM/VMMR3/PGMPool.cpp
index 2744dd7f..97a8baba 100644
--- a/src/VBox/VMM/VMMR3/PGMPool.cpp
+++ b/src/VBox/VMM/VMMR3/PGMPool.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -100,6 +100,7 @@
#include <VBox/vmm/mm.h>
#include "PGMInternal.h"
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include "PGMInline.h"
#include <VBox/log.h>
@@ -114,7 +115,7 @@
*******************************************************************************/
static DECLCALLBACK(int) pgmR3PoolAccessHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
#ifdef VBOX_WITH_DEBUGGER
-static DECLCALLBACK(int) pgmR3PoolCmdCheck(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
+static FNDBGCCMD pgmR3PoolCmdCheck;
#endif
#ifdef VBOX_WITH_DEBUGGER
@@ -282,53 +283,32 @@ int pgmR3PoolInit(PVM pVM)
pPool->pszAccessHandler = "Guest Paging Access Handler";
pPool->HCPhysTree = 0;
- /* The NIL entry. */
+ /*
+ * The NIL entry.
+ */
Assert(NIL_PGMPOOL_IDX == 0);
pPool->aPages[NIL_PGMPOOL_IDX].enmKind = PGMPOOLKIND_INVALID;
pPool->aPages[NIL_PGMPOOL_IDX].idx = NIL_PGMPOOL_IDX;
-
- /* The Shadow 32-bit PD. (32 bits guest paging) */
- pPool->aPages[PGMPOOL_IDX_PD].enmKind = PGMPOOLKIND_32BIT_PD;
- pPool->aPages[PGMPOOL_IDX_PD].idx = PGMPOOL_IDX_PD;
-
- /* The Shadow PDPT. */
- pPool->aPages[PGMPOOL_IDX_PDPT].enmKind = PGMPOOLKIND_PAE_PDPT;
- pPool->aPages[PGMPOOL_IDX_PDPT].idx = PGMPOOL_IDX_PDPT;
-
- /* The Shadow AMD64 CR3. */
- pPool->aPages[PGMPOOL_IDX_AMD64_CR3].enmKind = PGMPOOLKIND_64BIT_PML4;
- pPool->aPages[PGMPOOL_IDX_AMD64_CR3].idx = PGMPOOL_IDX_AMD64_CR3;
-
- /* The Nested Paging CR3. */
- pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].enmKind = PGMPOOLKIND_ROOT_NESTED;
- pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].idx = PGMPOOL_IDX_NESTED_ROOT;
-
- /*
- * Set common stuff.
- */
- for (unsigned iPage = 0; iPage < PGMPOOL_IDX_FIRST; iPage++)
- {
- pPool->aPages[iPage].Core.Key = NIL_RTHCPHYS;
- pPool->aPages[iPage].GCPhys = NIL_RTGCPHYS;
- pPool->aPages[iPage].iNext = NIL_PGMPOOL_IDX;
- /* pPool->aPages[iPage].cLocked = INT32_MAX; - test this out... */
- pPool->aPages[iPage].pvPageR3 = 0;
- pPool->aPages[iPage].iUserHead = NIL_PGMPOOL_USER_INDEX;
- pPool->aPages[iPage].iModifiedNext = NIL_PGMPOOL_IDX;
- pPool->aPages[iPage].iModifiedPrev = NIL_PGMPOOL_IDX;
- pPool->aPages[iPage].iMonitoredNext = NIL_PGMPOOL_IDX;
- pPool->aPages[iPage].iMonitoredNext = NIL_PGMPOOL_IDX;
- pPool->aPages[iPage].iAgeNext = NIL_PGMPOOL_IDX;
- pPool->aPages[iPage].iAgePrev = NIL_PGMPOOL_IDX;
-
- Assert(pPool->aPages[iPage].idx == iPage);
- Assert(pPool->aPages[iPage].GCPhys == NIL_RTGCPHYS);
- Assert(!pPool->aPages[iPage].fSeenNonGlobal);
- Assert(!pPool->aPages[iPage].fMonitored);
- Assert(!pPool->aPages[iPage].fCached);
- Assert(!pPool->aPages[iPage].fZeroed);
- Assert(!pPool->aPages[iPage].fReusedFlushPending);
- }
+ pPool->aPages[NIL_PGMPOOL_IDX].Core.Key = NIL_RTHCPHYS;
+ pPool->aPages[NIL_PGMPOOL_IDX].GCPhys = NIL_RTGCPHYS;
+ pPool->aPages[NIL_PGMPOOL_IDX].iNext = NIL_PGMPOOL_IDX;
+ /* pPool->aPages[NIL_PGMPOOL_IDX].cLocked = INT32_MAX; - test this out... */
+ pPool->aPages[NIL_PGMPOOL_IDX].pvPageR3 = 0;
+ pPool->aPages[NIL_PGMPOOL_IDX].iUserHead = NIL_PGMPOOL_USER_INDEX;
+ pPool->aPages[NIL_PGMPOOL_IDX].iModifiedNext = NIL_PGMPOOL_IDX;
+ pPool->aPages[NIL_PGMPOOL_IDX].iModifiedPrev = NIL_PGMPOOL_IDX;
+ pPool->aPages[NIL_PGMPOOL_IDX].iMonitoredNext = NIL_PGMPOOL_IDX;
+ pPool->aPages[NIL_PGMPOOL_IDX].iMonitoredNext = NIL_PGMPOOL_IDX;
+ pPool->aPages[NIL_PGMPOOL_IDX].iAgeNext = NIL_PGMPOOL_IDX;
+ pPool->aPages[NIL_PGMPOOL_IDX].iAgePrev = NIL_PGMPOOL_IDX;
+
+ Assert(pPool->aPages[NIL_PGMPOOL_IDX].idx == NIL_PGMPOOL_IDX);
+ Assert(pPool->aPages[NIL_PGMPOOL_IDX].GCPhys == NIL_RTGCPHYS);
+ Assert(!pPool->aPages[NIL_PGMPOOL_IDX].fSeenNonGlobal);
+ Assert(!pPool->aPages[NIL_PGMPOOL_IDX].fMonitored);
+ Assert(!pPool->aPages[NIL_PGMPOOL_IDX].fCached);
+ Assert(!pPool->aPages[NIL_PGMPOOL_IDX].fZeroed);
+ Assert(!pPool->aPages[NIL_PGMPOOL_IDX].fReusedFlushPending);
#ifdef VBOX_WITH_STATISTICS
/*
@@ -430,12 +410,17 @@ void pgmR3PoolRelocate(PVM pVM)
pVM->pgm.s.pPoolR3->pVMRC = pVM->pVMRC;
pVM->pgm.s.pPoolR3->paUsersRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pPoolR3->paUsersR3);
pVM->pgm.s.pPoolR3->paPhysExtsRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pPoolR3->paPhysExtsR3);
- int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "pgmPoolAccessHandler", &pVM->pgm.s.pPoolR3->pfnAccessHandlerRC);
- AssertReleaseRC(rc);
+
+ if (!HMIsEnabled(pVM))
+ {
+ int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "pgmPoolAccessHandler", &pVM->pgm.s.pPoolR3->pfnAccessHandlerRC);
+ AssertReleaseRC(rc);
+ }
+
/* init order hack. */
if (!pVM->pgm.s.pPoolR3->pfnAccessHandlerR0)
{
- rc = PDMR3LdrGetSymbolR0(pVM, NULL, "pgmPoolAccessHandler", &pVM->pgm.s.pPoolR3->pfnAccessHandlerR0);
+ int rc = PDMR3LdrGetSymbolR0(pVM, NULL, "pgmPoolAccessHandler", &pVM->pgm.s.pPoolR3->pfnAccessHandlerR0);
AssertReleaseRC(rc);
}
}
@@ -458,8 +443,8 @@ VMMR3DECL(int) PGMR3PoolGrow(PVM pVM)
(below 4 GB) memory. */
/** @todo change the pool to handle ROOT page allocations specially when
* required. */
- bool fCanUseHighMemory = HWACCMIsNestedPagingActive(pVM)
- && HWACCMGetShwPagingMode(pVM) == PGMMODE_EPT;
+ bool fCanUseHighMemory = HMIsNestedPagingActive(pVM)
+ && HMGetShwPagingMode(pVM) == PGMMODE_EPT;
pgmLock(pVM);
@@ -784,21 +769,6 @@ DECLCALLBACK(VBOXSTRICTRC) pgmR3PoolClearAllRendezvous(PVM pVM, PVMCPU pVCpu, vo
}
}
- /* swipe the special pages too. */
- for (iPage = PGMPOOL_IDX_FIRST_SPECIAL; iPage < PGMPOOL_IDX_FIRST; iPage++)
- {
- PPGMPOOLPAGE pPage = &pPool->aPages[iPage];
- if (pPage->GCPhys != NIL_RTGCPHYS)
- {
- Assert(!pPage->cModifications || ++cModifiedPages);
- Assert(pPage->iModifiedNext == NIL_PGMPOOL_IDX || pPage->cModifications);
- Assert(pPage->iModifiedPrev == NIL_PGMPOOL_IDX || pPage->cModifications);
- pPage->iModifiedNext = NIL_PGMPOOL_IDX;
- pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
- pPage->cModifications = 0;
- }
- }
-
#ifndef DEBUG_michael
AssertMsg(cModifiedPages == pPool->cModifiedPages, ("%d != %d\n", cModifiedPages, pPool->cModifiedPages));
#endif
@@ -977,18 +947,13 @@ void pgmR3PoolWriteProtectPages(PVM pVM)
#ifdef VBOX_WITH_DEBUGGER
/**
- * The '.pgmpoolcheck' command.
- *
- * @returns VBox status.
- * @param pCmd Pointer to the command descriptor (as registered).
- * @param pCmdHlp Pointer to command helper functions.
- * @param pVM Pointer to the current VM (if any).
- * @param paArgs Pointer to (readonly) array of arguments.
- * @param cArgs Number of arguments in the array.
+ * @callback_method_impl{FNDBGCCMD, The '.pgmpoolcheck' command.}
*/
-static DECLCALLBACK(int) pgmR3PoolCmdCheck(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
+static DECLCALLBACK(int) pgmR3PoolCmdCheck(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
{
- DBGC_CMDHLP_REQ_VM_RET(pCmdHlp, pCmd, pVM);
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, -1, cArgs == 0);
uint32_t cErrors = 0;
NOREF(paArgs);
diff --git a/src/VBox/VMM/VMMR3/PGMSavedState.cpp b/src/VBox/VMM/VMMR3/PGMSavedState.cpp
index 19a84337..fbd01bb7 100644
--- a/src/VBox/VMM/VMMR3/PGMSavedState.cpp
+++ b/src/VBox/VMM/VMMR3/PGMSavedState.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2009 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -104,6 +104,27 @@
#define PGM_STATE_CRC32_ZERO_HALF_PAGE UINT32_C(0xf1e8ba9e)
+
+/** @name Old Page types used in older saved states.
+ * @{ */
+/** Old saved state: The usual invalid zero entry. */
+#define PGMPAGETYPE_OLD_INVALID 0
+/** Old saved state: RAM page. (RWX) */
+#define PGMPAGETYPE_OLD_RAM 1
+/** Old saved state: MMIO2 page. (RWX) */
+#define PGMPAGETYPE_OLD_MMIO2 1
+/** Old saved state: MMIO2 page aliased over an MMIO page. (RWX)
+ * See PGMHandlerPhysicalPageAlias(). */
+#define PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO 2
+/** Old saved state: Shadowed ROM. (RWX) */
+#define PGMPAGETYPE_OLD_ROM_SHADOW 3
+/** Old saved state: ROM page. (R-X) */
+#define PGMPAGETYPE_OLD_ROM 4
+/** Old saved state: MMIO page. (---) */
+#define PGMPAGETYPE_OLD_MMIO 5
+/** @} */
+
+
/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
@@ -1155,6 +1176,7 @@ static int pgmR3PrepRamPages(PVM pVM)
break;
case PGMPAGETYPE_MMIO:
+ case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
paLSPages[iPage].fZero = 0;
paLSPages[iPage].fShared = 0;
paLSPages[iPage].fDirty = 0;
@@ -2171,21 +2193,50 @@ static int pgmR3LoadPageToDevNullOld(PSSMHANDLE pSSM)
/**
+ * Compares a page with an old save type value.
+ *
+ * @returns true if equal, false if not.
+ * @param pPage The page to compare.
+ * @param uOldType The old type value from the saved state.
+ */
+DECLINLINE(bool) pgmR3CompareNewAndOldPageTypes(PPGMPAGE pPage, uint8_t uOldType)
+{
+ uint8_t uOldPageType;
+ switch (PGM_PAGE_GET_TYPE(pPage))
+ {
+ case PGMPAGETYPE_INVALID: uOldPageType = PGMPAGETYPE_OLD_INVALID; break;
+ case PGMPAGETYPE_RAM: uOldPageType = PGMPAGETYPE_OLD_RAM; break;
+ case PGMPAGETYPE_MMIO2: uOldPageType = PGMPAGETYPE_OLD_MMIO2; break;
+ case PGMPAGETYPE_MMIO2_ALIAS_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO; break;
+ case PGMPAGETYPE_ROM_SHADOW: uOldPageType = PGMPAGETYPE_OLD_ROM_SHADOW; break;
+ case PGMPAGETYPE_ROM: uOldPageType = PGMPAGETYPE_OLD_ROM; break;
+ case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: /* fall thru */
+ case PGMPAGETYPE_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO; break;
+ default:
+ AssertFailed();
+ uOldPageType = PGMPAGETYPE_OLD_INVALID;
+ break;
+ }
+ return uOldPageType == uOldType;
+}
+
+
+/**
* Loads a page without any bits in the saved state, i.e. making sure it's
* really zero.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
- * @param uType The page type or PGMPAGETYPE_INVALID (old saved
+ * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
* state).
* @param pPage The guest page tracking structure.
* @param GCPhys The page address.
* @param pRam The ram range (logging).
*/
-static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
+static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
{
- if ( PGM_PAGE_GET_TYPE(pPage) != uType
- && uType != PGMPAGETYPE_INVALID)
+ if ( uOldType != PGMPAGETYPE_OLD_INVALID
+ && !pgmR3CompareNewAndOldPageTypes(pPage, uOldType))
return VERR_SSM_UNEXPECTED_DATA;
/* I think this should be sufficient. */
@@ -2206,21 +2257,21 @@ static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pSSM The SSM handle.
- * @param uType The page type or PGMPAGETYEP_INVALID (old saved
+ * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
* state).
* @param pPage The guest page tracking structure.
* @param GCPhys The page address.
* @param pRam The ram range (logging).
*/
-static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
+static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
{
/*
* Match up the type, dealing with MMIO2 aliases (dropped).
*/
- AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == uType
- || uType == PGMPAGETYPE_INVALID
+ AssertLogRelMsgReturn( uOldType == PGMPAGETYPE_INVALID
+ || pgmR3CompareNewAndOldPageTypes(pPage, uOldType)
/* kudge for the expanded PXE bios (r67885) - @bugref{5687}: */
- || ( uType == PGMPAGETYPE_RAM
+ || ( uOldType == PGMPAGETYPE_OLD_RAM
&& GCPhys >= 0xed000
&& GCPhys <= 0xeffff
&& PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM)
@@ -2250,24 +2301,24 @@ static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAG
* @returns VBox status code, fully bitched errors.
* @param pVM Pointer to the VM.
* @param pSSM The SSM handle.
- * @param uType The page type.
+ * @param uOldType The page type.
* @param pPage The page.
* @param GCPhys The page address.
* @param pRam The RAM range (for error messages).
*/
-static int pgmR3LoadPageOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
+static int pgmR3LoadPageOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
{
uint8_t uState;
int rc = SSMR3GetU8(pSSM, &uState);
AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
if (uState == 0 /* zero */)
- rc = pgmR3LoadPageZeroOld(pVM, uType, pPage, GCPhys, pRam);
+ rc = pgmR3LoadPageZeroOld(pVM, uOldType, pPage, GCPhys, pRam);
else if (uState == 1)
- rc = pgmR3LoadPageBitsOld(pVM, pSSM, uType, pPage, GCPhys, pRam);
+ rc = pgmR3LoadPageBitsOld(pVM, pSSM, uOldType, pPage, GCPhys, pRam);
else
rc = VERR_PGM_INVALID_SAVED_PAGE_STATE;
- AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uType=%d GCPhys=%RGp %s rc=%Rrc\n",
- pPage, uState, uType, GCPhys, pRam->pszDesc, rc),
+ AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uOldType=%d GCPhys=%RGp %s rc=%Rrc\n",
+ pPage, uState, uOldType, GCPhys, pRam->pszDesc, rc),
rc);
return VINF_SUCCESS;
}
@@ -2439,13 +2490,13 @@ static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
{
RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
PPGMPAGE pPage = &pRam->aPages[iPage];
- uint8_t uType;
- rc = SSMR3GetU8(pSSM, &uType);
+ uint8_t uOldType;
+ rc = SSMR3GetU8(pSSM, &uOldType);
AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
- if (uType == PGMPAGETYPE_ROM_SHADOW)
+ if (uOldType == PGMPAGETYPE_OLD_ROM_SHADOW)
rc = pgmR3LoadShadowedRomPageOld(pVM, pSSM, pPage, GCPhysPage, pRam);
else
- rc = pgmR3LoadPageOld(pVM, pSSM, uType, pPage, GCPhysPage, pRam);
+ rc = pgmR3LoadPageOld(pVM, pSSM, uOldType, pPage, GCPhysPage, pRam);
AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
}
}
@@ -2493,7 +2544,8 @@ static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
PPGMPAGE pPage = &pRam->aPages[iPage];
if (fPresent)
{
- if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO)
+ if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO
+ || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
rc = pgmR3LoadPageToDevNullOld(pSSM);
else
rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
@@ -3214,6 +3266,7 @@ static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion,
if ( pVM->pgm.s.fMappingsFixed
&& pgmMapAreMappingsEnabled(pVM))
{
+#ifndef PGM_WITHOUT_MAPPINGS
RTGCPTR GCPtrFixed = pVM->pgm.s.GCPtrMappingFixed;
uint32_t cbFixed = pVM->pgm.s.cbMappingFixed;
pVM->pgm.s.fMappingsFixed = false;
@@ -3234,6 +3287,9 @@ static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion,
pVM->pgm.s.GCPtrMappingFixed = GCPtrFixed;
pVM->pgm.s.cbMappingFixed = cbFixed;
}
+#else
+ AssertFailed();
+#endif
}
else
{
diff --git a/src/VBox/VMM/VMMR3/PGMSharedPage.cpp b/src/VBox/VMM/VMMR3/PGMSharedPage.cpp
index bae1afb4..b37c4453 100644
--- a/src/VBox/VMM/VMMR3/PGMSharedPage.cpp
+++ b/src/VBox/VMM/VMMR3/PGMSharedPage.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2011 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -22,6 +22,7 @@
#define LOG_GROUP LOG_GROUP_PGM_SHARED
#include <VBox/vmm/pgm.h>
#include <VBox/vmm/stam.h>
+#include <VBox/vmm/uvm.h>
#include "PGMInternal.h"
#include <VBox/vmm/vm.h>
#include <VBox/sup.h>
@@ -318,16 +319,9 @@ VMMR3DECL(int) PGMR3SharedModuleGetPageState(PVM pVM, RTGCPTR GCPtrPage, bool *p
# ifdef VBOX_STRICT
/**
- * The '.pgmcheckduppages' command.
- *
- * @returns VBox status.
- * @param pCmd Pointer to the command descriptor (as registered).
- * @param pCmdHlp Pointer to command helper functions.
- * @param pVM Pointer to the current VM (if any).
- * @param paArgs Pointer to (readonly) array of arguments.
- * @param cArgs Number of arguments in the array.
+ * @callback_method_impl{FNDBGCCMD, The '.pgmcheckduppages' command.}
*/
-DECLCALLBACK(int) pgmR3CmdCheckDuplicatePages(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
+DECLCALLBACK(int) pgmR3CmdCheckDuplicatePages(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
{
unsigned cBallooned = 0;
unsigned cShared = 0;
@@ -337,6 +331,8 @@ DECLCALLBACK(int) pgmR3CmdCheckDuplicatePages(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHl
unsigned cAllocZero = 0;
unsigned cPages = 0;
NOREF(pCmd); NOREF(paArgs); NOREF(cArgs);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
pgmLock(pVM);
@@ -410,18 +406,13 @@ DECLCALLBACK(int) pgmR3CmdCheckDuplicatePages(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHl
/**
- * The '.pgmsharedmodules' command.
- *
- * @returns VBox status.
- * @param pCmd Pointer to the command descriptor (as registered).
- * @param pCmdHlp Pointer to command helper functions.
- * @param pVM Pointer to the current VM (if any).
- * @param paArgs Pointer to (readonly) array of arguments.
- * @param cArgs Number of arguments in the array.
+ * @callback_method_impl{FNDBGCCMD, The '.pgmsharedmodules' command.}
*/
-DECLCALLBACK(int) pgmR3CmdShowSharedModules(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
+DECLCALLBACK(int) pgmR3CmdShowSharedModules(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
{
NOREF(pCmd); NOREF(paArgs); NOREF(cArgs);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
pgmLock(pVM);
for (unsigned i = 0; i < RT_ELEMENTS(g_apSharedModules); i++)
diff --git a/src/VBox/VMM/VMMR3/PGMShw.h b/src/VBox/VMM/VMMR3/PGMShw.h
index b4079612..6b2f6b9d 100644
--- a/src/VBox/VMM/VMMR3/PGMShw.h
+++ b/src/VBox/VMM/VMMR3/PGMShw.h
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -36,7 +36,6 @@
#undef SHW_PDPT_SHIFT
#undef SHW_PDPT_MASK
#undef SHW_PDPE_PG_MASK
-#undef SHW_POOL_ROOT_IDX
#if PGM_SHW_TYPE == PGM_TYPE_32BIT
# define SHWPT X86PT
@@ -54,7 +53,6 @@
# define SHW_PTE_PG_MASK X86_PTE_PG_MASK
# define SHW_PT_SHIFT X86_PT_SHIFT
# define SHW_PT_MASK X86_PT_MASK
-# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PD
#elif PGM_SHW_TYPE == PGM_TYPE_EPT
# define SHWPT EPTPT
@@ -75,7 +73,6 @@
# define SHW_PDPT_MASK EPT_PDPT_MASK
# define SHW_PDPE_PG_MASK EPT_PDPE_PG_MASK
# define SHW_TOTAL_PD_ENTRIES (EPT_PG_AMD64_ENTRIES*EPT_PG_AMD64_PDPE_ENTRIES)
-# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_NESTED_ROOT /* do not use! exception is real mode & protected mode without paging. */
#else
# define SHWPT PGMSHWPTPAE
@@ -98,14 +95,12 @@
# define SHW_PDPT_MASK X86_PDPT_MASK_AMD64
# define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
# define SHW_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES*X86_PG_AMD64_PDPE_ENTRIES)
-# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_AMD64_CR3
# else /* 32 bits PAE mode */
# define SHW_PDPT_SHIFT X86_PDPT_SHIFT
# define SHW_PDPT_MASK X86_PDPT_MASK_PAE
# define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
# define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES*X86_PG_PAE_PDPE_ENTRIES)
-# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PDPT
# endif
#endif
@@ -149,13 +144,16 @@ PGM_SHW_DECL(int, InitData)(PVM pVM, PPGMMODEDATA pModeData, bool fResolveGCAndR
{
int rc;
+ if (!HMIsEnabled(pVM))
+ {
#if PGM_SHW_TYPE != PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT /* No AMD64 for traditional virtualization, only VT-x and AMD-V. */
- /* GC */
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_SHW_NAME_RC_STR(GetPage), &pModeData->pfnRCShwGetPage);
- AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_SHW_NAME_RC_STR(GetPage), rc), rc);
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_SHW_NAME_RC_STR(ModifyPage), &pModeData->pfnRCShwModifyPage);
- AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_SHW_NAME_RC_STR(ModifyPage), rc), rc);
+ /* GC */
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_SHW_NAME_RC_STR(GetPage), &pModeData->pfnRCShwGetPage);
+ AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_SHW_NAME_RC_STR(GetPage), rc), rc);
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_SHW_NAME_RC_STR(ModifyPage), &pModeData->pfnRCShwModifyPage);
+ AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_SHW_NAME_RC_STR(ModifyPage), rc), rc);
#endif /* Not AMD64 shadow paging. */
+ }
/* Ring-0 */
rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_SHW_NAME_R0_STR(GetPage), &pModeData->pfnR0ShwGetPage);
@@ -187,19 +185,17 @@ PGM_SHW_DECL(int, Enter)(PVMCPU pVCpu, bool fIs64BitsPagingMode)
PPGMPOOLPAGE pNewShwPageCR3;
PVM pVM = pVCpu->pVMR3;
- Assert(HWACCMIsNestedPagingActive(pVM) == pVM->pgm.s.fNestedPaging);
+ Assert(HMIsNestedPagingActive(pVM) == pVM->pgm.s.fNestedPaging);
Assert(pVM->pgm.s.fNestedPaging);
Assert(!pVCpu->pgm.s.pShwPageCR3R3);
pgmLock(pVM);
int rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_ROOT_NESTED, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
- PGMPOOL_IDX_NESTED_ROOT, GCPhysCR3 >> PAGE_SHIFT, true /*fLockPage*/,
+ NIL_PGMPOOL_IDX, UINT32_MAX, true /*fLockPage*/,
&pNewShwPageCR3);
AssertFatalRC(rc);
- pVCpu->pgm.s.iShwUser = PGMPOOL_IDX_NESTED_ROOT;
- pVCpu->pgm.s.iShwUserTable = GCPhysCR3 >> PAGE_SHIFT;
pVCpu->pgm.s.pShwPageCR3R3 = pNewShwPageCR3;
pVCpu->pgm.s.pShwPageCR3RC = MMHyperCCToRC(pVM, pVCpu->pgm.s.pShwPageCR3R3);
@@ -245,8 +241,6 @@ PGM_SHW_DECL(int, Exit)(PVMCPU pVCpu)
{
PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
- Assert(pVCpu->pgm.s.iShwUser == PGMPOOL_IDX_NESTED_ROOT);
-
pgmLock(pVM);
/* Do *not* unlock this page as we have two of them floating around in the 32-bit host & 64-bit guest case.
@@ -256,12 +250,10 @@ PGM_SHW_DECL(int, Exit)(PVMCPU pVCpu)
*/
/* pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); */
- pgmPoolFreeByPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), pVCpu->pgm.s.iShwUser, pVCpu->pgm.s.iShwUserTable);
+ pgmPoolFreeByPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), NIL_PGMPOOL_IDX, UINT32_MAX);
pVCpu->pgm.s.pShwPageCR3R3 = 0;
pVCpu->pgm.s.pShwPageCR3R0 = 0;
pVCpu->pgm.s.pShwPageCR3RC = 0;
- pVCpu->pgm.s.iShwUser = 0;
- pVCpu->pgm.s.iShwUserTable = 0;
pgmUnlock(pVM);
diff --git a/src/VBox/VMM/VMMR3/SELM.cpp b/src/VBox/VMM/VMMR3/SELM.cpp
index 4e4149ac..1504a37f 100644
--- a/src/VBox/VMM/VMMR3/SELM.cpp
+++ b/src/VBox/VMM/VMMR3/SELM.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2012 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -62,6 +62,8 @@
#include <VBox/vmm/selm.h>
#include <VBox/vmm/cpum.h>
#include <VBox/vmm/stam.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/hm.h>
#include <VBox/vmm/mm.h>
#include <VBox/vmm/ssm.h>
#include <VBox/vmm/pgm.h>
@@ -79,15 +81,7 @@
#include <iprt/thread.h>
#include <iprt/string.h>
-
-/**
- * Enable or disable tracking of Shadow GDT/LDT/TSS.
- * @{
- */
-#define SELM_TRACK_SHADOW_GDT_CHANGES
-#define SELM_TRACK_SHADOW_LDT_CHANGES
-#define SELM_TRACK_SHADOW_TSS_CHANGES
-/** @} */
+#include "SELMInline.h"
/** SELM saved state version. */
@@ -128,6 +122,7 @@ static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "F
*/
VMMR3DECL(int) SELMR3Init(PVM pVM)
{
+ int rc;
LogFlow(("SELMR3Init\n"));
/*
@@ -154,18 +149,21 @@ VMMR3DECL(int) SELMR3Init(PVM pVM)
pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] = (SELM_GDT_ELEMENTS - 0x4) << 3;
pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = (SELM_GDT_ELEMENTS - 0x5) << 3;
- /*
- * Allocate GDT table.
- */
- int rc = MMR3HyperAllocOnceNoRel(pVM, sizeof(pVM->selm.s.paGdtR3[0]) * SELM_GDT_ELEMENTS,
+ if (HMIsRawModeCtxNeeded(pVM))
+ {
+ /*
+ * Allocate GDT table.
+ */
+ rc = MMR3HyperAllocOnceNoRel(pVM, sizeof(pVM->selm.s.paGdtR3[0]) * SELM_GDT_ELEMENTS,
PAGE_SIZE, MM_TAG_SELM, (void **)&pVM->selm.s.paGdtR3);
- AssertRCReturn(rc, rc);
+ AssertRCReturn(rc, rc);
- /*
- * Allocate LDT area.
- */
- rc = MMR3HyperAllocOnceNoRel(pVM, _64K + PAGE_SIZE, PAGE_SIZE, MM_TAG_SELM, &pVM->selm.s.pvLdtR3);
- AssertRCReturn(rc, rc);
+ /*
+ * Allocate LDT area.
+ */
+ rc = MMR3HyperAllocOnceNoRel(pVM, _64K + PAGE_SIZE, PAGE_SIZE, MM_TAG_SELM, &pVM->selm.s.pvLdtR3);
+ AssertRCReturn(rc, rc);
+ }
/*
* Init Guest's and Shadow GDT, LDT, TSS changes control variables.
@@ -180,11 +178,11 @@ VMMR3DECL(int) SELMR3Init(PVM pVM)
pVM->selm.s.pvMonShwTssRC = RTRCPTR_MAX;
pVM->selm.s.GCSelTss = RTSEL_MAX;
- pVM->selm.s.fDisableMonitoring = false;
pVM->selm.s.fSyncTSSRing0Stack = false;
- /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. Outside the TSS on purpose; the CPU will not check it
- * for I/O operations. */
+ /* The I/O bitmap starts right after the virtual interrupt redirection
+ bitmap. Outside the TSS on purpose; the CPU will not check it for
+ I/O operations. */
pVM->selm.s.Tss.offIoBitmap = sizeof(VBOXTSS);
/* bit set to 1 means no redirection */
memset(pVM->selm.s.Tss.IntRedirBitmap, 0xff, sizeof(pVM->selm.s.Tss.IntRedirBitmap));
@@ -202,63 +200,74 @@ VMMR3DECL(int) SELMR3Init(PVM pVM)
/*
* Statistics.
*/
- STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest GDT.");
- STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest GDT.");
- STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestLDT, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/LDT", STAMUNIT_OCCURENCES, "The number of writes to the Guest LDT was detected.");
- STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS.");
- STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSRedir, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSRedir",STAMUNIT_OCCURENCES, "The number of handled redir bitmap writes to the Guest TSS.");
- STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandledChanged,STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSIntChg", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS where the R0 stack changed.");
- STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest TSS.");
- STAM_REG(pVM, &pVM->selm.s.StatTSSSync, STAMTYPE_PROFILE, "/PROF/SELM/TSSSync", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3SyncTSS() body.");
- STAM_REG(pVM, &pVM->selm.s.StatUpdateFromCPUM, STAMTYPE_PROFILE, "/PROF/SELM/UpdateFromCPUM", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3UpdateFromCPUM() body.");
-
- STAM_REL_REG(pVM, &pVM->selm.s.StatHyperSelsChanged, STAMTYPE_COUNTER, "/SELM/HyperSels/Changed", STAMUNIT_OCCURENCES, "The number of times we had to relocate our hypervisor selectors.");
- STAM_REL_REG(pVM, &pVM->selm.s.StatScanForHyperSels, STAMTYPE_COUNTER, "/SELM/HyperSels/Scan", STAMUNIT_OCCURENCES, "The number of times we had find free hypervisor selectors.");
-
- STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleES", STAMUNIT_OCCURENCES, "Stale ES was detected in UpdateFromCPUM.");
- STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleCS", STAMUNIT_OCCURENCES, "Stale CS was detected in UpdateFromCPUM.");
- STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleSS", STAMUNIT_OCCURENCES, "Stale SS was detected in UpdateFromCPUM.");
- STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleDS", STAMUNIT_OCCURENCES, "Stale DS was detected in UpdateFromCPUM.");
- STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleFS", STAMUNIT_OCCURENCES, "Stale FS was detected in UpdateFromCPUM.");
- STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleGS", STAMUNIT_OCCURENCES, "Stale GS was detected in UpdateFromCPUM.");
-
- STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleES", STAMUNIT_OCCURENCES, "Already stale ES in UpdateFromCPUM.");
- STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleCS", STAMUNIT_OCCURENCES, "Already stale CS in UpdateFromCPUM.");
- STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleSS", STAMUNIT_OCCURENCES, "Already stale SS in UpdateFromCPUM.");
- STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleDS", STAMUNIT_OCCURENCES, "Already stale DS in UpdateFromCPUM.");
- STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleFS", STAMUNIT_OCCURENCES, "Already stale FS in UpdateFromCPUM.");
- STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleGS", STAMUNIT_OCCURENCES, "Already stale GS in UpdateFromCPUM.");
-
- STAM_REL_REG(pVM, &pVM->selm.s.StatStaleToUnstaleSReg, STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/StaleToUnstale", STAMUNIT_OCCURENCES, "Transitions from stale to unstale UpdateFromCPUM.");
-
- STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedES", STAMUNIT_OCCURENCES, "Updated hidden ES values in UpdateFromCPUM.");
- STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedCS", STAMUNIT_OCCURENCES, "Updated hidden CS values in UpdateFromCPUM.");
- STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedSS", STAMUNIT_OCCURENCES, "Updated hidden SS values in UpdateFromCPUM.");
- STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedDS", STAMUNIT_OCCURENCES, "Updated hidden DS values in UpdateFromCPUM.");
- STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedFS", STAMUNIT_OCCURENCES, "Updated hidden FS values in UpdateFromCPUM.");
- STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedGS", STAMUNIT_OCCURENCES, "Updated hidden GS values in UpdateFromCPUM.");
+ if (!HMIsEnabled(pVM))
+ {
+ STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest GDT.");
+ STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest GDT.");
+ STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestLDT, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/LDT", STAMUNIT_OCCURENCES, "The number of writes to the Guest LDT was detected.");
+ STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS.");
+ STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSRedir, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSRedir",STAMUNIT_OCCURENCES, "The number of handled redir bitmap writes to the Guest TSS.");
+ STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandledChanged,STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSIntChg", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS where the R0 stack changed.");
+ STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest TSS.");
+ STAM_REG(pVM, &pVM->selm.s.StatTSSSync, STAMTYPE_PROFILE, "/PROF/SELM/TSSSync", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3SyncTSS() body.");
+ STAM_REG(pVM, &pVM->selm.s.StatUpdateFromCPUM, STAMTYPE_PROFILE, "/PROF/SELM/UpdateFromCPUM", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3UpdateFromCPUM() body.");
+
+ STAM_REL_REG(pVM, &pVM->selm.s.StatHyperSelsChanged, STAMTYPE_COUNTER, "/SELM/HyperSels/Changed", STAMUNIT_OCCURENCES, "The number of times we had to relocate our hypervisor selectors.");
+ STAM_REL_REG(pVM, &pVM->selm.s.StatScanForHyperSels, STAMTYPE_COUNTER, "/SELM/HyperSels/Scan", STAMUNIT_OCCURENCES, "The number of times we had find free hypervisor selectors.");
+
+ STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleES", STAMUNIT_OCCURENCES, "Stale ES was detected in UpdateFromCPUM.");
+ STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleCS", STAMUNIT_OCCURENCES, "Stale CS was detected in UpdateFromCPUM.");
+ STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleSS", STAMUNIT_OCCURENCES, "Stale SS was detected in UpdateFromCPUM.");
+ STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleDS", STAMUNIT_OCCURENCES, "Stale DS was detected in UpdateFromCPUM.");
+ STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleFS", STAMUNIT_OCCURENCES, "Stale FS was detected in UpdateFromCPUM.");
+ STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleGS", STAMUNIT_OCCURENCES, "Stale GS was detected in UpdateFromCPUM.");
+
+ STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleES", STAMUNIT_OCCURENCES, "Already stale ES in UpdateFromCPUM.");
+ STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleCS", STAMUNIT_OCCURENCES, "Already stale CS in UpdateFromCPUM.");
+ STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleSS", STAMUNIT_OCCURENCES, "Already stale SS in UpdateFromCPUM.");
+ STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleDS", STAMUNIT_OCCURENCES, "Already stale DS in UpdateFromCPUM.");
+ STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleFS", STAMUNIT_OCCURENCES, "Already stale FS in UpdateFromCPUM.");
+ STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleGS", STAMUNIT_OCCURENCES, "Already stale GS in UpdateFromCPUM.");
+
+ STAM_REL_REG(pVM, &pVM->selm.s.StatStaleToUnstaleSReg, STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/StaleToUnstale", STAMUNIT_OCCURENCES, "Transitions from stale to unstale UpdateFromCPUM.");
+
+ STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedES", STAMUNIT_OCCURENCES, "Updated hidden ES values in UpdateFromCPUM.");
+ STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedCS", STAMUNIT_OCCURENCES, "Updated hidden CS values in UpdateFromCPUM.");
+ STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedSS", STAMUNIT_OCCURENCES, "Updated hidden SS values in UpdateFromCPUM.");
+ STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedDS", STAMUNIT_OCCURENCES, "Updated hidden DS values in UpdateFromCPUM.");
+ STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedFS", STAMUNIT_OCCURENCES, "Updated hidden FS values in UpdateFromCPUM.");
+ STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedGS", STAMUNIT_OCCURENCES, "Updated hidden GS values in UpdateFromCPUM.");
+ }
STAM_REG( pVM, &pVM->selm.s.StatLoadHidSelGst, STAMTYPE_COUNTER, "/SELM/LoadHidSel/LoadedGuest", STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: Loaded from guest tables.");
STAM_REG( pVM, &pVM->selm.s.StatLoadHidSelShw, STAMTYPE_COUNTER, "/SELM/LoadHidSel/LoadedShadow", STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: Loaded from shadow tables.");
STAM_REL_REG(pVM, &pVM->selm.s.StatLoadHidSelReadErrors, STAMTYPE_COUNTER, "/SELM/LoadHidSel/GstReadErrors", STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: Guest table read errors.");
STAM_REL_REG(pVM, &pVM->selm.s.StatLoadHidSelGstNoGood, STAMTYPE_COUNTER, "/SELM/LoadHidSel/NoGoodGuest", STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: No good guest table entry.");
+#ifdef VBOX_WITH_RAW_MODE
/*
* Default action when entering raw mode for the first time
*/
- PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */
- VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
- VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
- VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
+ if (!HMIsEnabled(pVM))
+ {
+ PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
+ }
+#endif
/*
* Register info handlers.
*/
- DBGFR3InfoRegisterInternal(pVM, "gdt", "Displays the shadow GDT. No arguments.", &selmR3InfoGdt);
+ if (HMIsRawModeCtxNeeded(pVM))
+ {
+ DBGFR3InfoRegisterInternal(pVM, "gdt", "Displays the shadow GDT. No arguments.", &selmR3InfoGdt);
+ DBGFR3InfoRegisterInternal(pVM, "ldt", "Displays the shadow LDT. No arguments.", &selmR3InfoLdt);
+ //DBGFR3InfoRegisterInternal(pVM, "tss", "Displays the shadow TSS. No arguments.", &selmR3InfoTss);
+ }
DBGFR3InfoRegisterInternal(pVM, "gdtguest", "Displays the guest GDT. No arguments.", &selmR3InfoGdtGuest);
- DBGFR3InfoRegisterInternal(pVM, "ldt", "Displays the shadow LDT. No arguments.", &selmR3InfoLdt);
DBGFR3InfoRegisterInternal(pVM, "ldtguest", "Displays the guest LDT. No arguments.", &selmR3InfoLdtGuest);
- //DBGFR3InfoRegisterInternal(pVM, "tss", "Displays the shadow TSS. No arguments.", &selmR3InfoTss);
//DBGFR3InfoRegisterInternal(pVM, "tssguest", "Displays the guest TSS. No arguments.", &selmR3InfoTssGuest);
return rc;
@@ -273,6 +282,7 @@ VMMR3DECL(int) SELMR3Init(PVM pVM)
*/
VMMR3DECL(int) SELMR3InitFinalize(PVM pVM)
{
+#ifdef VBOX_WITH_RAW_MODE
/** @cfgm{/DoubleFault,bool,false}
* Enables catching of double faults in the raw-mode context VMM code. This can
* be used when the triple faults or hangs occur and one suspect an unhandled
@@ -283,13 +293,13 @@ VMMR3DECL(int) SELMR3InitFinalize(PVM pVM)
* 8 TSS for the back link.
*/
bool f;
-#if defined(DEBUG_bird)
+# if defined(DEBUG_bird)
int rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "DoubleFault", &f, true);
-#else
+# else
int rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "DoubleFault", &f, false);
-#endif
+# endif
AssertLogRelRCReturn(rc, rc);
- if (f)
+ if (f && HMIsRawModeCtxNeeded(pVM))
{
PX86DESC paGdt = pVM->selm.s.paGdtR3;
rc = PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> 3]), sizeof(paGdt[0]),
@@ -305,6 +315,7 @@ VMMR3DECL(int) SELMR3InitFinalize(PVM pVM)
X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
AssertRC(rc);
}
+#endif /* VBOX_WITH_RAW_MODE */
return VINF_SUCCESS;
}
@@ -420,72 +431,74 @@ VMMR3DECL(void) SELMR3Relocate(PVM pVM)
PX86DESC paGdt = pVM->selm.s.paGdtR3;
LogFlow(("SELMR3Relocate\n"));
- for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ if (HMIsRawModeCtxNeeded(pVM))
{
- PVMCPU pVCpu = &pVM->aCpus[i];
-
- /*
- * Update GDTR and selector.
- */
- CPUMSetHyperGDTR(pVCpu, MMHyperR3ToRC(pVM, paGdt), SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1);
+ for (VMCPUID i = 0; i < pVM->cCpus; i++)
+ {
+ PVMCPU pVCpu = &pVM->aCpus[i];
- /** @todo selector relocations should be a separate operation? */
- CPUMSetHyperCS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]);
- CPUMSetHyperDS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
- CPUMSetHyperES(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
- CPUMSetHyperSS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
- CPUMSetHyperTR(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]);
- }
+ /*
+ * Update GDTR and selector.
+ */
+ CPUMSetHyperGDTR(pVCpu, MMHyperR3ToRC(pVM, paGdt), SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1);
+
+ /** @todo selector relocations should be a separate operation? */
+ CPUMSetHyperCS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]);
+ CPUMSetHyperDS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
+ CPUMSetHyperES(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
+ CPUMSetHyperSS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
+ CPUMSetHyperTR(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]);
+ }
- selmR3SetupHyperGDTSelectors(pVM);
+ selmR3SetupHyperGDTSelectors(pVM);
/** @todo SELM must be called when any of the CR3s changes during a cpu mode change. */
/** @todo PGM knows the proper CR3 values these days, not CPUM. */
- /*
- * Update the TSSes.
- */
- /* Only applies to raw mode which supports only 1 VCPU */
- PVMCPU pVCpu = &pVM->aCpus[0];
-
- /* Current TSS */
- pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVCpu);
- pVM->selm.s.Tss.ss0 = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
- pVM->selm.s.Tss.esp0 = VMMGetStackRC(pVCpu);
- pVM->selm.s.Tss.cs = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
- pVM->selm.s.Tss.ds = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
- pVM->selm.s.Tss.es = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
- pVM->selm.s.Tss.offIoBitmap = sizeof(VBOXTSS);
-
- /* trap 08 */
- pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu); /* this should give use better survival chances. */
- pVM->selm.s.TssTrap08.ss0 = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
- pVM->selm.s.TssTrap08.ss = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
- pVM->selm.s.TssTrap08.esp0 = VMMGetStackRC(pVCpu) - PAGE_SIZE / 2; /* upper half can be analysed this way. */
- pVM->selm.s.TssTrap08.esp = pVM->selm.s.TssTrap08.esp0;
- pVM->selm.s.TssTrap08.ebp = pVM->selm.s.TssTrap08.esp0;
- pVM->selm.s.TssTrap08.cs = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
- pVM->selm.s.TssTrap08.ds = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
- pVM->selm.s.TssTrap08.es = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
- pVM->selm.s.TssTrap08.fs = 0;
- pVM->selm.s.TssTrap08.gs = 0;
- pVM->selm.s.TssTrap08.selLdt = 0;
- pVM->selm.s.TssTrap08.eflags = 0x2; /* all cleared */
- pVM->selm.s.TssTrap08.ecx = VM_RC_ADDR(pVM, &pVM->selm.s.Tss); /* setup ecx to normal Hypervisor TSS address. */
- pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.ecx;
- pVM->selm.s.TssTrap08.eax = pVM->selm.s.TssTrap08.ecx;
- pVM->selm.s.TssTrap08.edx = VM_RC_ADDR(pVM, pVM); /* setup edx VM address. */
- pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.edx;
- pVM->selm.s.TssTrap08.ebx = pVM->selm.s.TssTrap08.edx;
- pVM->selm.s.TssTrap08.offIoBitmap = sizeof(VBOXTSS);
- /* TRPM will be updating the eip */
-
- if ( !pVM->selm.s.fDisableMonitoring
- && !VMMIsHwVirtExtForced(pVM))
+ /*
+ * Update the TSSes.
+ */
+ /* Only applies to raw mode which supports only 1 VCPU */
+ PVMCPU pVCpu = &pVM->aCpus[0];
+
+ /* Current TSS */
+ pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVCpu);
+ pVM->selm.s.Tss.ss0 = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
+ pVM->selm.s.Tss.esp0 = VMMGetStackRC(pVCpu);
+ pVM->selm.s.Tss.cs = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
+ pVM->selm.s.Tss.ds = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
+ pVM->selm.s.Tss.es = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
+ pVM->selm.s.Tss.offIoBitmap = sizeof(VBOXTSS);
+
+ /* trap 08 */
+ pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu); /* this should give use better survival chances. */
+ pVM->selm.s.TssTrap08.ss0 = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
+ pVM->selm.s.TssTrap08.ss = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
+ pVM->selm.s.TssTrap08.esp0 = VMMGetStackRC(pVCpu) - PAGE_SIZE / 2; /* upper half can be analysed this way. */
+ pVM->selm.s.TssTrap08.esp = pVM->selm.s.TssTrap08.esp0;
+ pVM->selm.s.TssTrap08.ebp = pVM->selm.s.TssTrap08.esp0;
+ pVM->selm.s.TssTrap08.cs = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
+ pVM->selm.s.TssTrap08.ds = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
+ pVM->selm.s.TssTrap08.es = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
+ pVM->selm.s.TssTrap08.fs = 0;
+ pVM->selm.s.TssTrap08.gs = 0;
+ pVM->selm.s.TssTrap08.selLdt = 0;
+ pVM->selm.s.TssTrap08.eflags = 0x2; /* all cleared */
+ pVM->selm.s.TssTrap08.ecx = VM_RC_ADDR(pVM, &pVM->selm.s.Tss); /* setup ecx to normal Hypervisor TSS address. */
+ pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.ecx;
+ pVM->selm.s.TssTrap08.eax = pVM->selm.s.TssTrap08.ecx;
+ pVM->selm.s.TssTrap08.edx = VM_RC_ADDR(pVM, pVM); /* setup edx VM address. */
+ pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.edx;
+ pVM->selm.s.TssTrap08.ebx = pVM->selm.s.TssTrap08.edx;
+ pVM->selm.s.TssTrap08.offIoBitmap = sizeof(VBOXTSS);
+ /* TRPM will be updating the eip */
+ }
+
+ if (!HMIsEnabled(pVM))
{
/*
* Update shadow GDT/LDT/TSS write access handlers.
*/
- int rc;
+ int rc; NOREF(rc);
#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
if (pVM->selm.s.paGdtRC != NIL_RTRCPTR)
{
@@ -544,7 +557,7 @@ VMMR3DECL(void) SELMR3Relocate(PVM pVM)
VMMR3DECL(int) SELMR3Term(PVM pVM)
{
NOREF(pVM);
- return 0;
+ return VINF_SUCCESS;
}
@@ -564,25 +577,31 @@ VMMR3DECL(void) SELMR3Reset(PVM pVM)
/*
* Uninstall guest GDT/LDT/TSS write access handlers.
*/
- int rc;
+ int rc = VINF_SUCCESS;
if (pVM->selm.s.GuestGdtr.pGdt != RTRCPTR_MAX && pVM->selm.s.fGDTRangeRegistered)
{
+#ifdef SELM_TRACK_GUEST_GDT_CHANGES
rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
AssertRC(rc);
+#endif
pVM->selm.s.GuestGdtr.pGdt = RTRCPTR_MAX;
pVM->selm.s.GuestGdtr.cbGdt = 0;
}
pVM->selm.s.fGDTRangeRegistered = false;
if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX)
{
+#ifdef SELM_TRACK_GUEST_LDT_CHANGES
rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
AssertRC(rc);
+#endif
pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX;
}
if (pVM->selm.s.GCPtrGuestTss != RTRCPTR_MAX)
{
+#ifdef SELM_TRACK_GUEST_TSS_CHANGES
rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
AssertRC(rc);
+#endif
pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX;
pVM->selm.s.GCSelTss = RTSEL_MAX;
}
@@ -596,82 +615,18 @@ VMMR3DECL(void) SELMR3Reset(PVM pVM)
pVM->selm.s.fSyncTSSRing0Stack = false;
- /*
- * Default action when entering raw mode for the first time
- */
- PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */
- VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
- VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
- VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
-}
-
-/**
- * Disable GDT/LDT/TSS monitoring and syncing
- *
- * @param pVM Pointer to the VM.
- */
-VMMR3DECL(void) SELMR3DisableMonitoring(PVM pVM)
-{
- /*
- * Uninstall guest GDT/LDT/TSS write access handlers.
- */
- int rc;
- if (pVM->selm.s.GuestGdtr.pGdt != RTRCPTR_MAX && pVM->selm.s.fGDTRangeRegistered)
- {
- rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
- AssertRC(rc);
- pVM->selm.s.GuestGdtr.pGdt = RTRCPTR_MAX;
- pVM->selm.s.GuestGdtr.cbGdt = 0;
- }
- pVM->selm.s.fGDTRangeRegistered = false;
- if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX)
- {
- rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
- AssertRC(rc);
- pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX;
- }
- if (pVM->selm.s.GCPtrGuestTss != RTRCPTR_MAX)
- {
- rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
- AssertRC(rc);
- pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX;
- pVM->selm.s.GCSelTss = RTSEL_MAX;
- }
-
- /*
- * Unregister shadow GDT/LDT/TSS write access handlers.
- */
-#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
- if (pVM->selm.s.paGdtRC != NIL_RTRCPTR)
- {
- rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.paGdtRC);
- AssertRC(rc);
- pVM->selm.s.paGdtRC = NIL_RTRCPTR;
- }
-#endif
-#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
- if (pVM->selm.s.pvMonShwTssRC != RTRCPTR_MAX)
+#ifdef VBOX_WITH_RAW_MODE
+ if (!HMIsEnabled(pVM))
{
- rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.pvMonShwTssRC);
- AssertRC(rc);
- pVM->selm.s.pvMonShwTssRC = RTRCPTR_MAX;
- }
-#endif
-#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
- if (pVM->selm.s.pvLdtRC != RTRCPTR_MAX)
- {
- rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.pvLdtRC);
- AssertRC(rc);
- pVM->selm.s.pvLdtRC = RTRCPTR_MAX;
+ /*
+ * Default action when entering raw mode for the first time
+ */
+ PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
}
#endif
-
- PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */
- VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
- VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
- VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
-
- pVM->selm.s.fDisableMonitoring = true;
}
@@ -691,7 +646,7 @@ static DECLCALLBACK(int) selmR3Save(PVM pVM, PSSMHANDLE pSSM)
*/
PSELM pSelm = &pVM->selm.s;
- SSMR3PutBool(pSSM, pSelm->fDisableMonitoring);
+ SSMR3PutBool(pSSM, HMIsEnabled(pVM));
SSMR3PutBool(pSSM, pSelm->fSyncTSSRing0Stack);
SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_CS]);
SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_DS]);
@@ -731,7 +686,8 @@ static DECLCALLBACK(int) selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion,
SELMR3Reset(pVM);
/* Get the monitoring flag. */
- SSMR3GetBool(pSSM, &pVM->selm.s.fDisableMonitoring);
+ bool fIgnored;
+ SSMR3GetBool(pSSM, &fIgnored);
/* Get the TSS state flag. */
SSMR3GetBool(pSSM, &pVM->selm.s.fSyncTSSRing0Stack);
@@ -776,38 +732,43 @@ static DECLCALLBACK(int) selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion,
*/
static DECLCALLBACK(int) selmR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
{
- PVMCPU pVCpu = VMMGetCpu(pVM);
+#ifdef VBOX_WITH_RAW_MODE
+ if (!HMIsEnabled(pVM))
+ {
+ PVMCPU pVCpu = VMMGetCpu(pVM);
- LogFlow(("selmR3LoadDone:\n"));
+ LogFlow(("selmR3LoadDone:\n"));
- /*
- * Don't do anything if it's a load failure.
- */
- int rc = SSMR3HandleGetStatus(pSSM);
- if (RT_FAILURE(rc))
- return VINF_SUCCESS;
+ /*
+ * Don't do anything if it's a load failure.
+ */
+ int rc = SSMR3HandleGetStatus(pSSM);
+ if (RT_FAILURE(rc))
+ return VINF_SUCCESS;
- /*
- * Do the syncing if we're in protected mode.
- */
- if (PGMGetGuestMode(pVCpu) != PGMMODE_REAL)
- {
+ /*
+ * Do the syncing if we're in protected mode and using raw-mode.
+ */
+ if (PGMGetGuestMode(pVCpu) != PGMMODE_REAL)
+ {
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
+ SELMR3UpdateFromCPUM(pVM, pVCpu);
+ }
+
+ /*
+ * Flag everything for resync on next raw mode entry.
+ */
VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
- SELMR3UpdateFromCPUM(pVM, pVCpu);
}
-
- /*
- * Flag everything for resync on next raw mode entry.
- */
- VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
- VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
- VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
-
+#endif /*VBOX_WITH_RAW_MODE*/
return VINF_SUCCESS;
}
+#ifdef VBOX_WITH_RAW_MODE
/**
* Updates (syncs) the shadow GDT.
@@ -818,6 +779,8 @@ static DECLCALLBACK(int) selmR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
*/
static int selmR3UpdateShadowGdt(PVM pVM, PVMCPU pVCpu)
{
+ Assert(!HMIsEnabled(pVM));
+
/*
* Always assume the best...
*/
@@ -921,7 +884,7 @@ static int selmR3UpdateShadowGdt(PVM pVM, PVMCPU pVCpu)
Log(("Internal SELM GDT conflict: use non-present entries\n"));
STAM_REL_COUNTER_INC(&pVM->selm.s.StatScanForHyperSels);
- while (pGDTECur > pGDTEStart)
+ while ((uintptr_t)pGDTECur > (uintptr_t)pGDTEStart)
{
/* We can reuse non-present entries */
if (!pGDTECur->Gen.u1Present)
@@ -951,6 +914,15 @@ static int selmR3UpdateShadowGdt(PVM pVM, PVMCPU pVCpu)
aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = SELM_HYPER_DEFAULT_SEL_TSS_TRAP08;
}
+# ifdef VBOX_WITH_SAFE_STR
+ /* Use the guest's TR selector to plug the str virtualization hole. */
+ if (CPUMGetGuestTR(pVCpu, NULL) != 0)
+ {
+ Log(("SELM: Use guest TSS selector %x\n", CPUMGetGuestTR(pVCpu, NULL)));
+ aHyperSel[SELM_HYPER_SEL_TSS] = CPUMGetGuestTR(pVCpu, NULL);
+ }
+# endif
+
/*
* Work thru the copied GDT entries adjusting them for correct virtualization.
*/
@@ -958,7 +930,7 @@ static int selmR3UpdateShadowGdt(PVM pVM, PVMCPU pVCpu)
while (pGDTE < pGDTEEnd)
{
if (pGDTE->Gen.u1Present)
- selmGuestToShadowDesc(pGDTE);
+ selmGuestToShadowDesc(pVM, pGDTE);
/* Next GDT entry. */
pGDTE++;
@@ -988,7 +960,12 @@ static int selmR3UpdateShadowGdt(PVM pVM, PVMCPU pVCpu)
*/
VMR3Relocate(pVM, 0);
}
+# ifdef VBOX_WITH_SAFE_STR
+ else if ( cbEffLimit >= SELM_HYPER_DEFAULT_BASE
+ || CPUMGetGuestTR(pVCpu, NULL) != 0) /* Our shadow TR entry was overwritten when we synced the guest's GDT. */
+# else
else if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE)
+# endif
/* We overwrote all entries above, so we have to save them again. */
selmR3SetupHyperGDTSelectors(pVM);
@@ -1010,6 +987,7 @@ static int selmR3UpdateShadowGdt(PVM pVM, PVMCPU pVCpu)
{
Log(("SELMR3UpdateFromCPUM: Guest's GDT is changed to pGdt=%016RX64 cbGdt=%08X\n", GDTR.pGdt, GDTR.cbGdt));
+# ifdef SELM_TRACK_GUEST_GDT_CHANGES
/*
* [Re]Register write virtual handler for guest's GDT.
*/
@@ -1023,8 +1001,28 @@ static int selmR3UpdateShadowGdt(PVM pVM, PVMCPU pVCpu)
GDTR.pGdt, GDTR.pGdt + GDTR.cbGdt /* already inclusive */,
0, selmR3GuestGDTWriteHandler, "selmRCGuestGDTWriteHandler", 0,
"Guest GDT write access handler");
+# ifdef VBOX_WITH_RAW_RING1
+ /** @todo !HACK ALERT!
+ * Some guest OSes (QNX) share code and the GDT on the same page;
+ * PGMR3HandlerVirtualRegister doesn't support more than one handler,
+ * so we kick out the PATM handler as this one is more important.
+ * Fix this properly in PGMR3HandlerVirtualRegister?
+ */
+ if (rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT)
+ {
+ LogRel(("selmR3UpdateShadowGdt: Virtual handler conflict %RGv -> kick out PATM handler for the higher priority GDT page monitor\n", GDTR.pGdt));
+ rc = PGMHandlerVirtualDeregister(pVM, GDTR.pGdt & PAGE_BASE_GC_MASK);
+ AssertRC(rc);
+
+ rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE,
+ GDTR.pGdt, GDTR.pGdt + GDTR.cbGdt /* already inclusive */,
+ 0, selmR3GuestGDTWriteHandler, "selmRCGuestGDTWriteHandler", 0,
+ "Guest GDT write access handler");
+ }
+# endif
if (RT_FAILURE(rc))
return rc;
+# endif /* SELM_TRACK_GUEST_GDT_CHANGES */
/* Update saved Guest GDTR. */
pVM->selm.s.GuestGdtr = GDTR;
@@ -1045,6 +1043,7 @@ static int selmR3UpdateShadowGdt(PVM pVM, PVMCPU pVCpu)
static int selmR3UpdateShadowLdt(PVM pVM, PVMCPU pVCpu)
{
int rc = VINF_SUCCESS;
+ Assert(!HMIsEnabled(pVM));
/*
* Always assume the best...
@@ -1135,6 +1134,7 @@ static int selmR3UpdateShadowLdt(PVM pVM, PVMCPU pVCpu)
Log(("SELMR3UpdateFromCPUM: Guest LDT changed to from %RGv:%04x to %RGv:%04x. (GDTR=%016RX64:%04x)\n",
pVM->selm.s.GCPtrGuestLdt, pVM->selm.s.cbLdtLimit, GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt));
+# ifdef SELM_TRACK_GUEST_LDT_CHANGES
/*
* [Re]Register write virtual handler for guest's GDT.
* In the event of LDT overlapping something, don't install it just assume it's being updated.
@@ -1144,10 +1144,10 @@ static int selmR3UpdateShadowLdt(PVM pVM, PVMCPU pVCpu)
rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
AssertRC(rc);
}
-#ifdef DEBUG
+# ifdef LOG_ENABLED
if (pDesc->Gen.u1Present)
Log(("LDT selector marked not present!!\n"));
-#endif
+# endif
rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrLdt, GCPtrLdt + cbLdt /* already inclusive */,
0, selmR3GuestLDTWriteHandler, "selmRCGuestLDTWriteHandler", 0, "Guest LDT write access handler");
if (rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT)
@@ -1164,7 +1164,9 @@ static int selmR3UpdateShadowLdt(PVM pVM, PVMCPU pVCpu)
CPUMSetHyperLDTR(pVCpu, 0);
return rc;
}
-
+# else
+ pVM->selm.s.GCPtrGuestLdt = GCPtrLdt;
+# endif
pVM->selm.s.cbLdtLimit = cbLdt;
}
}
@@ -1237,7 +1239,7 @@ static int selmR3UpdateShadowLdt(PVM pVM, PVMCPU pVCpu)
while (pLDTE <= pLDTEEnd)
{
if (pLDTE->Gen.u1Present)
- selmGuestToShadowDesc(pLDTE);
+ selmGuestToShadowDesc(pVM, pLDTE);
/* Next LDT entry. */
pLDTE++;
@@ -1276,6 +1278,7 @@ static int selmR3UpdateShadowLdt(PVM pVM, PVMCPU pVCpu)
static VBOXSTRICTRC selmR3UpdateSegmentRegisters(PVM pVM, PVMCPU pVCpu)
{
Assert(CPUMIsGuestInProtectedMode(pVCpu));
+ Assert(!HMIsEnabled(pVM));
/*
* No stale selectors in V8086 mode.
@@ -1377,21 +1380,14 @@ static VBOXSTRICTRC selmR3UpdateSegmentRegisters(PVM pVM, PVMCPU pVCpu)
*/
VMMR3DECL(VBOXSTRICTRC) SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu)
{
- if (pVM->selm.s.fDisableMonitoring)
- {
- VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
- VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
- VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
- return VINF_SUCCESS;
- }
-
STAM_PROFILE_START(&pVM->selm.s.StatUpdateFromCPUM, a);
+ AssertReturn(!HMIsEnabled(pVM), VERR_SELM_HM_IPE);
/*
* GDT sync
*/
int rc;
- if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_GDT))
+ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT))
{
rc = selmR3UpdateShadowGdt(pVM, pVCpu);
if (RT_FAILURE(rc))
@@ -1402,7 +1398,7 @@ VMMR3DECL(VBOXSTRICTRC) SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu)
/*
* TSS sync
*/
- if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
+ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
{
rc = SELMR3SyncTSS(pVM, pVCpu);
if (RT_FAILURE(rc))
@@ -1413,7 +1409,7 @@ VMMR3DECL(VBOXSTRICTRC) SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu)
/*
* LDT sync
*/
- if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_LDT))
+ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT))
{
rc = selmR3UpdateShadowLdt(pVM, pVCpu);
if (RT_FAILURE(rc))
@@ -1430,7 +1426,9 @@ VMMR3DECL(VBOXSTRICTRC) SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu)
return rcStrict;
}
+#endif /*VBOX_WITH_RAW_MODE*/
+#ifdef SELM_TRACK_GUEST_GDT_CHANGES
/**
* \#PF Handler callback for virtual access handler ranges.
*
@@ -1457,8 +1455,9 @@ static DECLCALLBACK(int) selmR3GuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void
VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_SELM_SYNC_GDT);
return VINF_PGM_HANDLER_DO_DEFAULT;
}
+#endif
-
+#ifdef SELM_TRACK_GUEST_LDT_CHANGES
/**
* \#PF Handler callback for virtual access handler ranges.
*
@@ -1485,8 +1484,10 @@ static DECLCALLBACK(int) selmR3GuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void
VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_SELM_SYNC_LDT);
return VINF_PGM_HANDLER_DO_DEFAULT;
}
+#endif
+#ifdef SELM_TRACK_GUEST_TSS_CHANGES
/**
* \#PF Handler callback for virtual access handler ranges.
*
@@ -1518,7 +1519,9 @@ static DECLCALLBACK(int) selmR3GuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void
VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_SELM_SYNC_TSS);
return VINF_PGM_HANDLER_DO_DEFAULT;
}
+#endif
+#ifdef VBOX_WITH_RAW_MODE
/**
* Synchronize the shadowed fields in the TSS.
@@ -1534,16 +1537,11 @@ static DECLCALLBACK(int) selmR3GuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void
*/
VMMR3DECL(int) SELMR3SyncTSS(PVM pVM, PVMCPU pVCpu)
{
- int rc;
-
- if (pVM->selm.s.fDisableMonitoring)
- {
- VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
- return VINF_SUCCESS;
- }
+ int rc;
+ AssertReturnStmt(!HMIsEnabled(pVM), VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS), VINF_SUCCESS);
STAM_PROFILE_START(&pVM->selm.s.StatTSSSync, a);
- Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_TSS));
+ Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS));
/*
* Get TR and extract and store the basic info.
@@ -1641,7 +1639,7 @@ VMMR3DECL(int) SELMR3SyncTSS(PVM pVM, PVMCPU pVCpu)
*/
if (RT_SUCCESS(rc))
{
-#ifdef LOG_ENABLED
+# ifdef LOG_ENABLED
if (LogIsEnabled())
{
uint32_t ssr0, espr0;
@@ -1658,12 +1656,23 @@ VMMR3DECL(int) SELMR3SyncTSS(PVM pVM, PVMCPU pVCpu)
}
Log(("offIoBitmap=%#x\n", Tss.offIoBitmap));
}
-#endif /* LOG_ENABLED */
+# endif /* LOG_ENABLED */
AssertMsg(!(Tss.ss0 & 3), ("ring-1 leak into TSS.SS0? %04X:%08X\n", Tss.ss0, Tss.esp0));
/* Update our TSS structure for the guest's ring 1 stack */
selmSetRing1Stack(pVM, Tss.ss0 | 1, Tss.esp0);
pVM->selm.s.fSyncTSSRing0Stack = fNoRing1Stack = false;
+
+# ifdef VBOX_WITH_RAW_RING1
+ /* Update our TSS structure for the guest's ring 2 stack */
+ if (EMIsRawRing1Enabled(pVM))
+ {
+ if ( (pVM->selm.s.Tss.ss2 != ((Tss.ss1 & ~2) | 1))
+ || pVM->selm.s.Tss.esp2 != Tss.esp1)
+ Log(("SELMR3SyncTSS: Updating TSS ring 1 stack to %04X:%08X from %04X:%08X\n", Tss.ss1, Tss.esp1, (pVM->selm.s.Tss.ss2 & ~2) | 1, pVM->selm.s.Tss.esp2));
+ selmSetRing2Stack(pVM, (Tss.ss1 & ~1) | 2, Tss.esp1);
+ }
+# endif
}
}
@@ -1700,14 +1709,40 @@ VMMR3DECL(int) SELMR3SyncTSS(PVM pVM, PVMCPU pVCpu)
/* Register the write handler if TS != 0. */
if (cbMonitoredTss != 0)
{
+# ifdef SELM_TRACK_GUEST_TSS_CHANGES
rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrTss, GCPtrTss + cbMonitoredTss - 1,
0, selmR3GuestTSSWriteHandler,
"selmRCGuestTSSWriteHandler", 0, "Guest TSS write access handler");
if (RT_FAILURE(rc))
{
+# ifdef VBOX_WITH_RAW_RING1
+ /** @todo !HACK ALERT!
+ * Some guest OSes (QNX) share code and the TSS on the same page;
+ * PGMR3HandlerVirtualRegister doesn't support more than one
+ * handler, so we kick out the PATM handler as this one is more
+ * important. Fix this properly in PGMR3HandlerVirtualRegister?
+ */
+ if (rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT)
+ {
+ LogRel(("SELMR3SyncTSS: Virtual handler conflict %RGv -> kick out PATM handler for the higher priority TSS page monitor\n", GCPtrTss));
+ rc = PGMHandlerVirtualDeregister(pVM, GCPtrTss & PAGE_BASE_GC_MASK);
+ AssertRC(rc);
+
+ rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrTss, GCPtrTss + cbMonitoredTss - 1,
+ 0, selmR3GuestTSSWriteHandler,
+ "selmRCGuestTSSWriteHandler", 0, "Guest TSS write access handler");
+ if (RT_FAILURE(rc))
+ {
+ STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
+ return rc;
+ }
+ }
+# else
STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
return rc;
- }
+# endif
+ }
+# endif /* SELM_TRACK_GUEST_TSS_CHANGES */
/* Update saved Guest TSS info. */
pVM->selm.s.GCPtrGuestTss = GCPtrTss;
@@ -1740,6 +1775,7 @@ VMMR3DECL(int) SELMR3DebugCheck(PVM pVM)
{
#ifdef VBOX_STRICT
PVMCPU pVCpu = VMMGetCpu(pVM);
+ AssertReturn(!HMIsEnabled(pVM), VERR_SELM_HM_IPE);
/*
* Get GDTR and check for conflict.
@@ -1875,10 +1911,10 @@ VMMR3DECL(int) SELMR3DebugCheck(PVM pVM)
*/
VMMR3DECL(bool) SELMR3CheckTSS(PVM pVM)
{
-#ifdef VBOX_STRICT
+#if defined(VBOX_STRICT) && defined(SELM_TRACK_GUEST_TSS_CHANGES)
PVMCPU pVCpu = VMMGetCpu(pVM);
- if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
+ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
return true;
/*
@@ -2008,6 +2044,50 @@ VMMR3DECL(bool) SELMR3CheckTSS(PVM pVM)
}
+# ifdef VBOX_WITH_SAFE_STR
+/**
+ * Validates the RawR0 TR shadow GDT entry.
+ *
+ * @returns true if it matches.
+ * @returns false and assertions on mismatch..
+ * @param pVM Pointer to the VM.
+ */
+VMMR3DECL(bool) SELMR3CheckShadowTR(PVM pVM)
+{
+# ifdef VBOX_STRICT
+ PX86DESC paGdt = pVM->selm.s.paGdtR3;
+
+ /*
+ * TSS descriptor
+ */
+ PX86DESC pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] >> 3];
+ RTRCPTR RCPtrTSS = VM_RC_ADDR(pVM, &pVM->selm.s.Tss);
+
+ if ( pDesc->Gen.u16BaseLow != RT_LOWORD(RCPtrTSS)
+ || pDesc->Gen.u8BaseHigh1 != RT_BYTE3(RCPtrTSS)
+ || pDesc->Gen.u8BaseHigh2 != RT_BYTE4(RCPtrTSS)
+ || pDesc->Gen.u16LimitLow != sizeof(VBOXTSS) - 1
+ || pDesc->Gen.u4LimitHigh != 0
+ || (pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
+ || pDesc->Gen.u1DescType != 0 /* system */
+ || pDesc->Gen.u2Dpl != 0 /* supervisor */
+ || pDesc->Gen.u1Present != 1
+ || pDesc->Gen.u1Available != 0
+ || pDesc->Gen.u1Long != 0
+ || pDesc->Gen.u1DefBig != 0
+ || pDesc->Gen.u1Granularity != 0 /* byte limit */
+ )
+ {
+ AssertFailed();
+ return false;
+ }
+# endif
+ return true;
+}
+# endif /* VBOX_WITH_SAFE_STR */
+
+#endif /* VBOX_WITH_RAW_MODE */
+
/**
* Gets information about a 64-bit selector, SELMR3GetSelectorInfo helper.
*
@@ -2627,7 +2707,7 @@ static DECLCALLBACK(void) selmR3InfoLdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const
*/
VMMR3DECL(void) SELMR3DumpHyperGDT(PVM pVM)
{
- DBGFR3Info(pVM, "gdt", NULL, NULL);
+ DBGFR3Info(pVM->pUVM, "gdt", NULL, NULL);
}
@@ -2638,7 +2718,7 @@ VMMR3DECL(void) SELMR3DumpHyperGDT(PVM pVM)
*/
VMMR3DECL(void) SELMR3DumpHyperLDT(PVM pVM)
{
- DBGFR3Info(pVM, "ldt", NULL, NULL);
+ DBGFR3Info(pVM->pUVM, "ldt", NULL, NULL);
}
@@ -2649,7 +2729,7 @@ VMMR3DECL(void) SELMR3DumpHyperLDT(PVM pVM)
*/
VMMR3DECL(void) SELMR3DumpGuestGDT(PVM pVM)
{
- DBGFR3Info(pVM, "gdtguest", NULL, NULL);
+ DBGFR3Info(pVM->pUVM, "gdtguest", NULL, NULL);
}
@@ -2660,6 +2740,6 @@ VMMR3DECL(void) SELMR3DumpGuestGDT(PVM pVM)
*/
VMMR3DECL(void) SELMR3DumpGuestLDT(PVM pVM)
{
- DBGFR3Info(pVM, "ldtguest", NULL, NULL);
+ DBGFR3Info(pVM->pUVM, "ldtguest", NULL, NULL);
}
diff --git a/src/VBox/VMM/VMMR3/SSM.cpp b/src/VBox/VMM/VMMR3/SSM.cpp
index e303d0a6..5d77327a 100644
--- a/src/VBox/VMM/VMMR3/SSM.cpp
+++ b/src/VBox/VMM/VMMR3/SSM.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2010 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -146,9 +146,12 @@
#define LOG_GROUP LOG_GROUP_SSM
#include <VBox/vmm/ssm.h>
#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/pdmcritsect.h>
#include <VBox/vmm/mm.h>
#include "SSMInternal.h"
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include <VBox/err.h>
#include <VBox/log.h>
#include <VBox/version.h>
@@ -1091,7 +1094,7 @@ static DECLCALLBACK(int) ssmR3LiveControlLoadExec(PVM pVM, PSSMHANDLE pSSM, uint
AssertMsg(uPct < 100, ("uPct=%d uPartsPerTenThousand=%d uPercentPrepare=%d uPercentDone=%d\n", uPct, uPartsPerTenThousand, pSSM->uPercentPrepare, pSSM->uPercentDone));
pSSM->uPercent = uPct;
if (pSSM->pfnProgress)
- pSSM->pfnProgress(pVM, RT_MIN(uPct, 100 - pSSM->uPercentDone), pSSM->pvUser);
+ pSSM->pfnProgress(pVM->pUVM, RT_MIN(uPct, 100 - pSSM->uPercentDone), pSSM->pvUser);
}
}
return rc;
@@ -1231,10 +1234,12 @@ static int ssmR3Register(PVM pVM, const char *pszName, uint32_t uInstance,
* @param pfnLoadExec Execute load callback, optional.
* @param pfnLoadDone Done load callback, optional.
*/
-VMMR3DECL(int) SSMR3RegisterDevice(PVM pVM, PPDMDEVINS pDevIns, const char *pszName, uint32_t uInstance, uint32_t uVersion, size_t cbGuess, const char *pszBefore,
- PFNSSMDEVLIVEPREP pfnLivePrep, PFNSSMDEVLIVEEXEC pfnLiveExec, PFNSSMDEVLIVEVOTE pfnLiveVote,
- PFNSSMDEVSAVEPREP pfnSavePrep, PFNSSMDEVSAVEEXEC pfnSaveExec, PFNSSMDEVSAVEDONE pfnSaveDone,
- PFNSSMDEVLOADPREP pfnLoadPrep, PFNSSMDEVLOADEXEC pfnLoadExec, PFNSSMDEVLOADDONE pfnLoadDone)
+VMMR3_INT_DECL(int)
+SSMR3RegisterDevice(PVM pVM, PPDMDEVINS pDevIns, const char *pszName,
+ uint32_t uInstance, uint32_t uVersion, size_t cbGuess, const char *pszBefore,
+ PFNSSMDEVLIVEPREP pfnLivePrep, PFNSSMDEVLIVEEXEC pfnLiveExec, PFNSSMDEVLIVEVOTE pfnLiveVote,
+ PFNSSMDEVSAVEPREP pfnSavePrep, PFNSSMDEVSAVEEXEC pfnSaveExec, PFNSSMDEVSAVEDONE pfnSaveDone,
+ PFNSSMDEVLOADPREP pfnLoadPrep, PFNSSMDEVLOADEXEC pfnLoadExec, PFNSSMDEVLOADDONE pfnLoadDone)
{
PSSMUNIT pUnit;
int rc = ssmR3Register(pVM, pszName, uInstance, uVersion, cbGuess, pszBefore, &pUnit);
@@ -1251,6 +1256,7 @@ VMMR3DECL(int) SSMR3RegisterDevice(PVM pVM, PPDMDEVINS pDevIns, const char *pszN
pUnit->u.Dev.pfnLoadExec = pfnLoadExec;
pUnit->u.Dev.pfnLoadDone = pfnLoadDone;
pUnit->u.Dev.pDevIns = pDevIns;
+ pUnit->pCritSect = PDMR3DevGetCritSect(pVM, pDevIns);
}
return rc;
}
@@ -1282,10 +1288,11 @@ VMMR3DECL(int) SSMR3RegisterDevice(PVM pVM, PPDMDEVINS pDevIns, const char *pszN
* @param pfnLoadExec Execute load callback, optional.
* @param pfnLoadDone Done load callback, optional.
*/
-VMMR3DECL(int) SSMR3RegisterDriver(PVM pVM, PPDMDRVINS pDrvIns, const char *pszName, uint32_t uInstance, uint32_t uVersion, size_t cbGuess,
- PFNSSMDRVLIVEPREP pfnLivePrep, PFNSSMDRVLIVEEXEC pfnLiveExec, PFNSSMDRVLIVEVOTE pfnLiveVote,
- PFNSSMDRVSAVEPREP pfnSavePrep, PFNSSMDRVSAVEEXEC pfnSaveExec, PFNSSMDRVSAVEDONE pfnSaveDone,
- PFNSSMDRVLOADPREP pfnLoadPrep, PFNSSMDRVLOADEXEC pfnLoadExec, PFNSSMDRVLOADDONE pfnLoadDone)
+VMMR3_INT_DECL(int)
+SSMR3RegisterDriver(PVM pVM, PPDMDRVINS pDrvIns, const char *pszName, uint32_t uInstance, uint32_t uVersion, size_t cbGuess,
+ PFNSSMDRVLIVEPREP pfnLivePrep, PFNSSMDRVLIVEEXEC pfnLiveExec, PFNSSMDRVLIVEVOTE pfnLiveVote,
+ PFNSSMDRVSAVEPREP pfnSavePrep, PFNSSMDRVSAVEEXEC pfnSaveExec, PFNSSMDRVSAVEDONE pfnSaveDone,
+ PFNSSMDRVLOADPREP pfnLoadPrep, PFNSSMDRVLOADEXEC pfnLoadExec, PFNSSMDRVLOADDONE pfnLoadDone)
{
PSSMUNIT pUnit;
int rc = ssmR3Register(pVM, pszName, uInstance, uVersion, cbGuess, NULL, &pUnit);
@@ -1361,7 +1368,7 @@ VMMR3DECL(int) SSMR3RegisterInternal(PVM pVM, const char *pszName, uint32_t uIns
*
* @returns VBox status.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszName Data unit name.
* @param uInstance The instance identifier of the data unit.
* This must together with the name be unique.
@@ -1382,11 +1389,15 @@ VMMR3DECL(int) SSMR3RegisterInternal(PVM pVM, const char *pszName, uint32_t uIns
* @param pfnLoadDone Done load callback, optional.
* @param pvUser User argument.
*/
-VMMR3DECL(int) SSMR3RegisterExternal(PVM pVM, const char *pszName, uint32_t uInstance, uint32_t uVersion, size_t cbGuess,
+VMMR3DECL(int) SSMR3RegisterExternal(PUVM pUVM, const char *pszName, uint32_t uInstance, uint32_t uVersion, size_t cbGuess,
PFNSSMEXTLIVEPREP pfnLivePrep, PFNSSMEXTLIVEEXEC pfnLiveExec, PFNSSMEXTLIVEVOTE pfnLiveVote,
PFNSSMEXTSAVEPREP pfnSavePrep, PFNSSMEXTSAVEEXEC pfnSaveExec, PFNSSMEXTSAVEDONE pfnSaveDone,
PFNSSMEXTLOADPREP pfnLoadPrep, PFNSSMEXTLOADEXEC pfnLoadExec, PFNSSMEXTLOADDONE pfnLoadDone, void *pvUser)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
PSSMUNIT pUnit;
int rc = ssmR3Register(pVM, pszName, uInstance, uVersion, cbGuess, NULL, &pUnit);
if (RT_SUCCESS(rc))
@@ -1408,6 +1419,37 @@ VMMR3DECL(int) SSMR3RegisterExternal(PVM pVM, const char *pszName, uint32_t uIns
/**
+ * @callback_method_impl{FNSSMINTLOADEXEC,
+ * Stub that skips the whole unit (see SSMR3RegisterStub).}
+ */
+static DECLCALLBACK(int) ssmR3LoadExecStub(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ NOREF(pVM); NOREF(uVersion); NOREF(uPass);
+ return SSMR3SkipToEndOfUnit(pSSM);
+}
+
+
+/**
+ * Registers a stub state loader for working around legacy.
+ *
+ * This is used to deal with irelevant PATM and CSAM saved state units in HM
+ * mode and when built without raw-mode.
+ *
+ * @returns VBox status code.
+ * @param pVM The VM handle.
+ * @param pszName Data unit name.
+ * @param uInstance Instance number.
+ */
+VMMR3DECL(int) SSMR3RegisterStub(PVM pVM, const char *pszName, uint32_t uInstance)
+{
+ return SSMR3RegisterInternal(pVM, pszName, uInstance, UINT32_MAX, 0,
+ NULL, NULL, NULL,
+ NULL, NULL, NULL,
+ NULL, ssmR3LoadExecStub, NULL);
+}
+
+
+/**
* Deregister one or more PDM Device data units.
*
* @returns VBox status.
@@ -1637,8 +1679,12 @@ VMMR3DECL(int) SSMR3DeregisterInternal(PVM pVM, const char *pszName)
* @param pszName Data unit name.
* @remark Only for dynamic data units.
*/
-VMMR3DECL(int) SSMR3DeregisterExternal(PVM pVM, const char *pszName)
+VMMR3DECL(int) SSMR3DeregisterExternal(PUVM pUVM, const char *pszName)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
return ssmR3DeregisterByNameAndType(pVM, pszName, SSMUNITTYPE_EXTERNAL);
}
@@ -2978,7 +3024,7 @@ static void ssmR3ProgressByByte(PSSMHANDLE pSSM, uint64_t cbAdvance)
&& pSSM->uPercent <= 100 - pSSM->uPercentDone)
{
if (pSSM->pfnProgress)
- pSSM->pfnProgress(pSSM->pVM, pSSM->uPercent, pSSM->pvUser);
+ pSSM->pfnProgress(pSSM->pVM->pUVM, pSSM->uPercent, pSSM->pvUser);
pSSM->uPercent++;
pSSM->offEstProgress = (pSSM->uPercent - pSSM->uPercentPrepare - pSSM->uPercentLive) * pSSM->cbEstTotal
/ (100 - pSSM->uPercentDone - pSSM->uPercentPrepare - pSSM->uPercentLive);
@@ -3421,24 +3467,25 @@ VMMR3DECL(int) SSMR3PutStruct(PSSMHANDLE pSSM, const void *pvStruct, PCSSMFIELD
break;
case SSMFIELDTRANS_GCPTR:
- AssertMsgReturn(pCur->cb == sizeof(RTGCPTR), ("%#x (%s)\n", pCur->cb, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTGCPTR), ("%#x (%s)\n", pCur->cb, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3PutGCPtr(pSSM, *(PRTGCPTR)pbField);
break;
case SSMFIELDTRANS_GCPHYS:
- AssertMsgReturn(pCur->cb == sizeof(RTGCPHYS), ("%#x (%s)\n", pCur->cb, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTGCPHYS), ("%#x (%s)\n", pCur->cb, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3PutGCPhys(pSSM, *(PRTGCPHYS)pbField);
break;
case SSMFIELDTRANS_RCPTR:
- AssertMsgReturn(pCur->cb == sizeof(RTRCPTR), ("%#x (%s)\n", pCur->cb, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTRCPTR), ("%#x (%s)\n", pCur->cb, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3PutRCPtr(pSSM, *(PRTRCPTR)pbField);
break;
case SSMFIELDTRANS_RCPTR_ARRAY:
{
uint32_t const cEntries = pCur->cb / sizeof(RTRCPTR);
- AssertMsgReturn(pCur->cb == cEntries * sizeof(RTRCPTR) && cEntries, ("%#x (%s)\n", pCur->cb, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->cb == cEntries * sizeof(RTRCPTR) && cEntries, ("%#x (%s)\n", pCur->cb, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = VINF_SUCCESS;
for (uint32_t i = 0; i < cEntries && RT_SUCCESS(rc); i++)
rc = SSMR3PutRCPtr(pSSM, ((PRTRCPTR)pbField)[i]);
@@ -3446,10 +3493,14 @@ VMMR3DECL(int) SSMR3PutStruct(PSSMHANDLE pSSM, const void *pvStruct, PCSSMFIELD
}
default:
- AssertMsgFailedReturn(("%#x\n", pCur->pfnGetPutOrTransformer), VERR_SSM_FIELD_COMPLEX);
+ AssertMsgFailedBreakStmt(("%#x\n", pCur->pfnGetPutOrTransformer), rc = VERR_SSM_FIELD_COMPLEX);
}
if (RT_FAILURE(rc))
+ {
+ if (RT_SUCCESS(pSSM->rc))
+ pSSM->rc = rc;
return rc;
+ }
}
/* end marker */
@@ -3521,7 +3572,7 @@ VMMR3DECL(int) SSMR3PutStructEx(PSSMHANDLE pSSM, const void *pvStruct, size_t cb
*/
SSM_ASSERT_WRITEABLE_RET(pSSM);
SSM_CHECK_CANCELLED_RET(pSSM);
- AssertMsgReturn(!(fFlags & ~SSMSTRUCT_FLAGS_VALID_MASK), ("%#x\n", fFlags), VERR_INVALID_PARAMETER);
+ AssertMsgReturn(!(fFlags & ~SSMSTRUCT_FLAGS_VALID_MASK), ("%#x\n", fFlags), pSSM->rc = VERR_INVALID_PARAMETER);
AssertPtr(pvStruct);
AssertPtr(paFields);
@@ -3539,6 +3590,7 @@ VMMR3DECL(int) SSMR3PutStructEx(PSSMHANDLE pSSM, const void *pvStruct, size_t cb
/*
* Put the fields
*/
+ rc = VINF_SUCCESS;
uint32_t off = 0;
for (PCSSMFIELD pCur = paFields;
pCur->cb != UINT32_MAX && pCur->off != UINT32_MAX;
@@ -3553,15 +3605,15 @@ VMMR3DECL(int) SSMR3PutStructEx(PSSMHANDLE pSSM, const void *pvStruct, size_t cb
: SSMFIELDTRANS_IS_PADDING(pCur->pfnGetPutOrTransformer)
? RT_HIWORD(pCur->cb)
: pCur->cb;
- AssertMsgReturn( cbField <= cbStruct
- && offField + cbField <= cbStruct
- && offField + cbField >= offField,
- ("off=%#x cb=%#x cbStruct=%#x (%s)\n", cbField, offField, cbStruct, pCur->pszName),
- VERR_SSM_FIELD_OUT_OF_BOUNDS);
- AssertMsgReturn( !(fFlags & SSMSTRUCT_FLAGS_FULL_STRUCT)
- || off == offField,
- ("off=%#x offField=%#x (%s)\n", off, offField, pCur->pszName),
- VERR_SSM_FIELD_NOT_CONSECUTIVE);
+ AssertMsgBreakStmt( cbField <= cbStruct
+ && offField + cbField <= cbStruct
+ && offField + cbField >= offField,
+ ("off=%#x cb=%#x cbStruct=%#x (%s)\n", cbField, offField, cbStruct, pCur->pszName),
+ rc = VERR_SSM_FIELD_OUT_OF_BOUNDS);
+ AssertMsgBreakStmt( !(fFlags & SSMSTRUCT_FLAGS_FULL_STRUCT)
+ || off == offField,
+ ("off=%#x offField=%#x (%s)\n", off, offField, pCur->pszName),
+ rc = VERR_SSM_FIELD_NOT_CONSECUTIVE);
rc = VINF_SUCCESS;
uint8_t const *pbField = (uint8_t const *)pvStruct + offField;
@@ -3572,53 +3624,60 @@ VMMR3DECL(int) SSMR3PutStructEx(PSSMHANDLE pSSM, const void *pvStruct, size_t cb
break;
case SSMFIELDTRANS_GCPHYS:
- AssertMsgReturn(cbField == sizeof(RTGCPHYS), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(RTGCPHYS), ("%#x (%s)\n", cbField, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3PutGCPhys(pSSM, *(PRTGCPHYS)pbField);
break;
case SSMFIELDTRANS_GCPTR:
- AssertMsgReturn(cbField == sizeof(RTGCPTR), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(RTGCPTR), ("%#x (%s)\n", cbField, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3PutGCPtr(pSSM, *(PRTGCPTR)pbField);
break;
case SSMFIELDTRANS_RCPTR:
- AssertMsgReturn(cbField == sizeof(RTRCPTR), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(RTRCPTR), ("%#x (%s)\n", cbField, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3PutRCPtr(pSSM, *(PRTRCPTR)pbField);
break;
case SSMFIELDTRANS_RCPTR_ARRAY:
{
uint32_t const cEntries = cbField / sizeof(RTRCPTR);
- AssertMsgReturn(cbField == cEntries * sizeof(RTRCPTR) && cEntries, ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == cEntries * sizeof(RTRCPTR) && cEntries, ("%#x (%s)\n", cbField, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
for (uint32_t i = 0; i < cEntries && RT_SUCCESS(rc); i++)
rc = SSMR3PutRCPtr(pSSM, ((PRTRCPTR)pbField)[i]);
break;
}
case SSMFIELDTRANS_HCPTR_NI:
- AssertMsgReturn(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = ssmR3PutHCPtrNI(pSSM, *(void * const *)pbField, fFlags);
break;
case SSMFIELDTRANS_HCPTR_NI_ARRAY:
{
uint32_t const cEntries = cbField / sizeof(void *);
- AssertMsgReturn(cbField == cEntries * sizeof(void *) && cEntries, ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == cEntries * sizeof(void *) && cEntries, ("%#x (%s)\n", cbField, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
for (uint32_t i = 0; i < cEntries && RT_SUCCESS(rc); i++)
rc = ssmR3PutHCPtrNI(pSSM, ((void * const *)pbField)[i], fFlags);
break;
}
case SSMFIELDTRANS_HCPTR_HACK_U32:
- AssertMsgReturn(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
- AssertMsgReturn(*(uintptr_t *)pbField <= UINT32_MAX, ("%p (%s)\n", *(uintptr_t *)pbField, pCur->pszName), VERR_SSM_FIELD_INVALID_VALUE);
+ AssertMsgBreakStmt(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(*(uintptr_t *)pbField <= UINT32_MAX, ("%p (%s)\n", *(uintptr_t *)pbField, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_VALUE);
rc = ssmR3DataWrite(pSSM, pbField, sizeof(uint32_t));
- if ((fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE) && sizeof(void *) != sizeof(uint32_t))
+ if ((fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE) && sizeof(void *) != sizeof(uint32_t) && RT_SUCCESS(rc))
rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(uint32_t));
break;
case SSMFIELDTRANS_U32_ZX_U64:
- AssertFailedReturn(VERR_SSM_FIELD_LOAD_ONLY_TRANSFORMATION);
+ AssertFailedBreakStmt(rc = VERR_SSM_FIELD_LOAD_ONLY_TRANSFORMATION);
break;
case SSMFIELDTRANS_IGNORE:
@@ -3627,62 +3686,68 @@ VMMR3DECL(int) SSMR3PutStructEx(PSSMHANDLE pSSM, const void *pvStruct, size_t cb
break;
case SSMFIELDTRANS_IGN_GCPHYS:
- AssertMsgReturn(cbField == sizeof(RTGCPHYS), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(RTGCPHYS), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(RTGCPHYS));
break;
case SSMFIELDTRANS_IGN_GCPTR:
- AssertMsgReturn(cbField == sizeof(RTGCPTR), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(RTGCPTR), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(RTGCPTR));
break;
case SSMFIELDTRANS_IGN_RCPTR:
- AssertMsgReturn(cbField == sizeof(RTRCPTR), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(RTRCPTR), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(RTRCPTR));
break;
case SSMFIELDTRANS_IGN_HCPTR:
- AssertMsgReturn(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(void *));
break;
case SSMFIELDTRANS_OLD:
- AssertMsgReturn(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = ssmR3PutZeros(pSSM, pCur->cb);
break;
case SSMFIELDTRANS_OLD_GCPHYS:
- AssertMsgReturn(pCur->cb == sizeof(RTGCPHYS) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTGCPHYS) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(RTGCPHYS));
break;
case SSMFIELDTRANS_OLD_GCPTR:
- AssertMsgReturn(pCur->cb == sizeof(RTGCPTR) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTGCPTR) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(RTGCPTR));
break;
case SSMFIELDTRANS_OLD_RCPTR:
- AssertMsgReturn(pCur->cb == sizeof(RTRCPTR) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTRCPTR) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(RTRCPTR));
break;
case SSMFIELDTRANS_OLD_HCPTR:
- AssertMsgReturn(pCur->cb == sizeof(void *) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->cb == sizeof(void *) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = ssmR3DataWrite(pSSM, g_abZero, sizeof(void *));
break;
case SSMFIELDTRANS_OLD_PAD_HC:
- AssertMsgReturn(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = ssmR3PutZeros(pSSM, HC_ARCH_BITS == 64 ? RT_HIWORD(pCur->cb) : RT_LOWORD(pCur->cb));
break;
case SSMFIELDTRANS_OLD_PAD_MSC32:
- AssertMsgReturn(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_SIZE);
if (SSM_HOST_IS_MSC_32)
rc = ssmR3PutZeros(pSSM, pCur->cb);
break;
@@ -3704,37 +3769,46 @@ VMMR3DECL(int) SSMR3PutStructEx(PSSMHANDLE pSSM, const void *pvStruct, size_t cb
|| ( (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_MSC32_AUTO
&& !ssmR3IsHostMsc32(pSSM))
? cb64 : cb32;
- AssertMsgReturn( cbField == cbCtx
- && ( ( pCur->off == UINT32_MAX / 2
- && ( cbField == 0
- || (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_HC_AUTO
- || (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_MSC32_AUTO
- )
+ AssertMsgBreakStmt( cbField == cbCtx
+ && ( ( pCur->off == UINT32_MAX / 2
+ && ( cbField == 0
+ || (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_HC_AUTO
+ || (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_MSC32_AUTO
+ )
+ )
+ || (pCur->off != UINT32_MAX / 2 && cbField != 0)
)
- || (pCur->off != UINT32_MAX / 2 && cbField != 0)
- )
- , ("cbField=%#x cb32=%#x cb64=%#x HC_ARCH_BITS=%u cbCtx=%#x cbSaved=%#x off=%#x\n",
- cbField, cb32, cb64, HC_ARCH_BITS, cbCtx, cbSaved, pCur->off),
- VERR_SSM_FIELD_INVALID_PADDING_SIZE);
+ , ("cbField=%#x cb32=%#x cb64=%#x HC_ARCH_BITS=%u cbCtx=%#x cbSaved=%#x off=%#x\n",
+ cbField, cb32, cb64, HC_ARCH_BITS, cbCtx, cbSaved, pCur->off),
+ rc = VERR_SSM_FIELD_INVALID_PADDING_SIZE);
if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
rc = ssmR3PutZeros(pSSM, cbSaved);
break;
}
default:
- AssertPtrReturn(pCur->pfnGetPutOrTransformer, VERR_SSM_FIELD_INVALID_CALLBACK);
+ AssertPtrBreakStmt(pCur->pfnGetPutOrTransformer, rc = VERR_SSM_FIELD_INVALID_CALLBACK);
rc = pCur->pfnGetPutOrTransformer(pSSM, pCur, (void *)pvStruct, fFlags, false /*fGetOrPut*/, pvUser);
break;
}
if (RT_FAILURE(rc))
- return rc;
+ break; /* Deal with failures in one place (see below). */
off = offField + cbField;
}
- AssertMsgReturn( !(fFlags & SSMSTRUCT_FLAGS_FULL_STRUCT)
- || off == cbStruct,
- ("off=%#x cbStruct=%#x\n", off, cbStruct),
- VERR_SSM_FIELD_NOT_CONSECUTIVE);
+
+ if (RT_SUCCESS(rc))
+ AssertMsgStmt( !(fFlags & SSMSTRUCT_FLAGS_FULL_STRUCT)
+ || off == cbStruct,
+ ("off=%#x cbStruct=%#x\n", off, cbStruct),
+ rc = VERR_SSM_FIELD_NOT_CONSECUTIVE);
+
+ if (RT_FAILURE(rc))
+ {
+ if (RT_SUCCESS(pSSM->rc))
+ pSSM->rc = rc;
+ return rc;
+ }
/*
* End marker
@@ -4229,6 +4303,39 @@ static int ssmR3LiveControlEmit(PSSMHANDLE pSSM, long double lrdPct, uint32_t uP
}
+
+/**
+ * Enters the critical session (optionally) associated with the unit.
+ *
+ * @param pUnit The unit.
+ */
+DECLINLINE(void) ssmR3UnitCritSectEnter(PSSMUNIT pUnit)
+{
+ PPDMCRITSECT pCritSect = pUnit->pCritSect;
+ if (pCritSect)
+ {
+ int rc = PDMCritSectEnter(pCritSect, VERR_IGNORED);
+ AssertRC(rc);
+ }
+}
+
+
+/**
+ * Leaves the critical session (optionally) associated with the unit.
+ *
+ * @param pUnit The unit.
+ */
+DECLINLINE(void) ssmR3UnitCritSectLeave(PSSMUNIT pUnit)
+{
+ PPDMCRITSECT pCritSect = pUnit->pCritSect;
+ if (pCritSect)
+ {
+ int rc = PDMCritSectLeave(pCritSect);
+ AssertRC(rc);
+ }
+}
+
+
/**
* Do the pfnSaveDone run.
*
@@ -4252,6 +4359,7 @@ static int ssmR3SaveDoDoneRun(PVM pVM, PSSMHANDLE pSSM)
{
int rcOld = pSSM->rc;
int rc;
+ ssmR3UnitCritSectEnter(pUnit);
switch (pUnit->enmType)
{
case SSMUNITTYPE_DEV:
@@ -4270,6 +4378,7 @@ static int ssmR3SaveDoDoneRun(PVM pVM, PSSMHANDLE pSSM)
rc = VERR_SSM_IPE_1;
break;
}
+ ssmR3UnitCritSectLeave(pUnit);
if (RT_SUCCESS(rc) && pSSM->rc != rcOld)
rc = pSSM->rc;
if (RT_FAILURE(rc))
@@ -4308,7 +4417,7 @@ static int ssmR3SaveDoClose(PVM pVM, PSSMHANDLE pSSM)
{
Assert(pSSM->enmOp == SSMSTATE_SAVE_DONE);
if (pSSM->pfnProgress)
- pSSM->pfnProgress(pVM, 100, pSSM->pvUser);
+ pSSM->pfnProgress(pVM->pUVM, 100, pSSM->pvUser);
LogRel(("SSM: Successfully saved the VM state to '%s'\n",
pSSM->pszFilename ? pSSM->pszFilename : "<remote-machine>"));
}
@@ -4524,7 +4633,7 @@ static void ssmR3ProgressByUnit(PSSMHANDLE pSSM, uint32_t iUnit)
{
ssmR3LiveControlEmit(pSSM, lrdPct, SSM_PASS_FINAL);
pSSM->uPercent = uPct;
- pSSM->pfnProgress(pSSM->pVM, uPct, pSSM->pvUser);
+ pSSM->pfnProgress(pSSM->pVM->pUVM, uPct, pSSM->pvUser);
}
}
}
@@ -4599,6 +4708,7 @@ static int ssmR3SaveDoExecRun(PVM pVM, PSSMHANDLE pSSM)
* Call the execute handler.
*/
ssmR3DataWriteBegin(pSSM);
+ ssmR3UnitCritSectEnter(pUnit);
switch (pUnit->enmType)
{
case SSMUNITTYPE_DEV:
@@ -4618,6 +4728,7 @@ static int ssmR3SaveDoExecRun(PVM pVM, PSSMHANDLE pSSM)
rc = VERR_SSM_IPE_1;
break;
}
+ ssmR3UnitCritSectLeave(pUnit);
pUnit->fCalled = true;
if (RT_FAILURE(rc) && RT_SUCCESS_NP(pSSM->rc))
pSSM->rc = rc;
@@ -4687,6 +4798,7 @@ static int ssmR3SaveDoPrepRun(PVM pVM, PSSMHANDLE pSSM)
if (pUnit->u.Common.pfnSavePrep)
{
int rc;
+ ssmR3UnitCritSectEnter(pUnit);
switch (pUnit->enmType)
{
case SSMUNITTYPE_DEV:
@@ -4705,6 +4817,7 @@ static int ssmR3SaveDoPrepRun(PVM pVM, PSSMHANDLE pSSM)
rc = VERR_SSM_IPE_1;
break;
}
+ ssmR3UnitCritSectLeave(pUnit);
pUnit->fCalled = true;
if (RT_FAILURE(rc) && RT_SUCCESS_NP(pSSM->rc))
pSSM->rc = rc;
@@ -4724,7 +4837,7 @@ static int ssmR3SaveDoPrepRun(PVM pVM, PSSMHANDLE pSSM)
* Work the progress indicator if we got one.
*/
if (pSSM->pfnProgress)
- pSSM->pfnProgress(pVM, pSSM->uPercentPrepare + pSSM->uPercentLive - 1, pSSM->pvUser);
+ pSSM->pfnProgress(pVM->pUVM, pSSM->uPercentPrepare + pSSM->uPercentLive - 1, pSSM->pvUser);
pSSM->uPercent = pSSM->uPercentPrepare + pSSM->uPercentLive;
return VINF_SUCCESS;
@@ -5028,6 +5141,7 @@ static int ssmR3LiveDoVoteRun(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
&& !pUnit->fDoneLive)
{
int rc;
+ ssmR3UnitCritSectEnter(pUnit);
switch (pUnit->enmType)
{
case SSMUNITTYPE_DEV:
@@ -5046,6 +5160,7 @@ static int ssmR3LiveDoVoteRun(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
rc = VERR_SSM_IPE_1;
break;
}
+ ssmR3UnitCritSectLeave(pUnit);
pUnit->fCalled = true;
Assert(pSSM->rc == VINF_SUCCESS);
if (rc != VINF_SUCCESS)
@@ -5096,7 +5211,7 @@ static int ssmR3LiveDoVoteRun(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
{
ssmR3LiveControlEmit(pSSM, lrdPct, uPass);
pSSM->uPercent = uPct;
- pSSM->pfnProgress(pVM, uPct, pSSM->pvUser);
+ pSSM->pfnProgress(pVM->pUVM, uPct, pSSM->pvUser);
}
}
}
@@ -5166,6 +5281,7 @@ static int ssmR3LiveDoExecRun(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
* Call the execute handler.
*/
ssmR3DataWriteBegin(pSSM);
+ ssmR3UnitCritSectEnter(pUnit);
switch (pUnit->enmType)
{
case SSMUNITTYPE_DEV:
@@ -5184,6 +5300,7 @@ static int ssmR3LiveDoExecRun(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
rc = VERR_SSM_IPE_1;
break;
}
+ ssmR3UnitCritSectLeave(pUnit);
pUnit->fCalled = true;
if (RT_FAILURE(rc) && RT_SUCCESS_NP(pSSM->rc))
pSSM->rc = rc;
@@ -5323,6 +5440,7 @@ static int ssmR3DoLivePrepRun(PVM pVM, PSSMHANDLE pSSM)
if (pUnit->u.Common.pfnLivePrep)
{
int rc;
+ ssmR3UnitCritSectEnter(pUnit);
switch (pUnit->enmType)
{
case SSMUNITTYPE_DEV:
@@ -5341,6 +5459,7 @@ static int ssmR3DoLivePrepRun(PVM pVM, PSSMHANDLE pSSM)
rc = VERR_SSM_IPE_1;
break;
}
+ ssmR3UnitCritSectLeave(pUnit);
pUnit->fCalled = true;
if (RT_FAILURE(rc) && RT_SUCCESS_NP(pSSM->rc))
pSSM->rc = rc;
@@ -5360,7 +5479,7 @@ static int ssmR3DoLivePrepRun(PVM pVM, PSSMHANDLE pSSM)
* Work the progress indicator if we got one.
*/
if (pSSM->pfnProgress)
- pSSM->pfnProgress(pVM, 2, pSSM->pvUser);
+ pSSM->pfnProgress(pVM->pUVM, 2, pSSM->pvUser);
pSSM->uPercent = 2;
return VINF_SUCCESS;
@@ -5720,7 +5839,7 @@ DECLINLINE(int) ssmR3DataReadV2RawLzfHdr(PSSMHANDLE pSSM, uint32_t *pcbDecompr)
AssertLogRelMsgReturn( pSSM->u.Read.cbRecLeft > 1
&& pSSM->u.Read.cbRecLeft <= RT_SIZEOFMEMB(SSMHANDLE, u.Read.abComprBuffer) + 2,
("%#x\n", pSSM->u.Read.cbRecLeft),
- VERR_SSM_INTEGRITY_DECOMPRESSION);
+ pSSM->rc = VERR_SSM_INTEGRITY_DECOMPRESSION);
uint8_t cKB;
int rc = ssmR3DataReadV2Raw(pSSM, &cKB, 1);
@@ -5732,7 +5851,7 @@ DECLINLINE(int) ssmR3DataReadV2RawLzfHdr(PSSMHANDLE pSSM, uint32_t *pcbDecompr)
AssertLogRelMsgReturn( cbDecompr >= pSSM->u.Read.cbRecLeft
&& cbDecompr <= RT_SIZEOFMEMB(SSMHANDLE, u.Read.abDataBuffer),
("%#x\n", cbDecompr),
- VERR_SSM_INTEGRITY_DECOMPRESSION);
+ pSSM->rc = VERR_SSM_INTEGRITY_DECOMPRESSION);
*pcbDecompr = cbDecompr;
return VINF_SUCCESS;
@@ -5780,7 +5899,7 @@ static int ssmR3DataReadV2RawLzf(PSSMHANDLE pSSM, void *pvDst, size_t cbDecompr)
pvDst, cbDecompr, &cbDstActual);
if (RT_SUCCESS(rc))
{
- AssertLogRelMsgReturn(cbDstActual == cbDecompr, ("%#x %#x\n", cbDstActual, cbDecompr), VERR_SSM_INTEGRITY_DECOMPRESSION);
+ AssertLogRelMsgReturn(cbDstActual == cbDecompr, ("%#x %#x\n", cbDstActual, cbDecompr), pSSM->rc = VERR_SSM_INTEGRITY_DECOMPRESSION);
return VINF_SUCCESS;
}
@@ -5799,7 +5918,7 @@ static int ssmR3DataReadV2RawLzf(PSSMHANDLE pSSM, void *pvDst, size_t cbDecompr)
DECLINLINE(int) ssmR3DataReadV2RawZeroHdr(PSSMHANDLE pSSM, uint32_t *pcbZero)
{
*pcbZero = 0; /* shuts up gcc. */
- AssertLogRelMsgReturn(pSSM->u.Read.cbRecLeft == 1, ("%#x\n", pSSM->u.Read.cbRecLeft), VERR_SSM_INTEGRITY_DECOMPRESSION);
+ AssertLogRelMsgReturn(pSSM->u.Read.cbRecLeft == 1, ("%#x\n", pSSM->u.Read.cbRecLeft), pSSM->rc = VERR_SSM_INTEGRITY_DECOMPRESSION);
uint8_t cKB;
int rc = ssmR3DataReadV2Raw(pSSM, &cKB, 1);
@@ -5809,7 +5928,7 @@ DECLINLINE(int) ssmR3DataReadV2RawZeroHdr(PSSMHANDLE pSSM, uint32_t *pcbZero)
uint32_t cbZero = (uint32_t)cKB * _1K;
AssertLogRelMsgReturn(cbZero <= RT_SIZEOFMEMB(SSMHANDLE, u.Read.abDataBuffer),
- ("%#x\n", cbZero), VERR_SSM_INTEGRITY_DECOMPRESSION);
+ ("%#x\n", cbZero), pSSM->rc = VERR_SSM_INTEGRITY_DECOMPRESSION);
*pcbZero = cbZero;
return VINF_SUCCESS;
@@ -5988,6 +6107,7 @@ static int ssmR3DataReadRecHdrV2(PSSMHANDLE pSSM)
/**
* Buffer miss, do an unbuffered read.
*
+ * @returns VBox status code. Sets pSSM->rc on error.
* @param pSSM The saved state handle.
* @param pvBuf Where to store the read data.
* @param cbBuf Number of bytes to read.
@@ -6089,7 +6209,7 @@ static int ssmR3DataReadUnbufferedV2(PSSMHANDLE pSSM, void *pvBuf, size_t cbBuf)
}
default:
- AssertMsgFailedReturn(("%x\n", pSSM->u.Read.u8TypeAndFlags), VERR_SSM_BAD_REC_TYPE);
+ AssertMsgFailedReturn(("%x\n", pSSM->u.Read.u8TypeAndFlags), pSSM->rc = VERR_SSM_BAD_REC_TYPE);
}
pSSM->offUnitUser += cbToRead;
@@ -6192,7 +6312,7 @@ static int ssmR3DataReadBufferedV2(PSSMHANDLE pSSM, void *pvBuf, size_t cbBuf)
}
default:
- AssertMsgFailedReturn(("%x\n", pSSM->u.Read.u8TypeAndFlags), VERR_SSM_BAD_REC_TYPE);
+ AssertMsgFailedReturn(("%x\n", pSSM->u.Read.u8TypeAndFlags), pSSM->rc = VERR_SSM_BAD_REC_TYPE);
}
/*pSSM->u.Read.offDataBuffer = 0;*/
@@ -6277,7 +6397,7 @@ VMMR3DECL(int) SSMR3GetStruct(PSSMHANDLE pSSM, void *pvStruct, PCSSMFIELD paFiel
int rc = SSMR3GetU32(pSSM, &u32Magic);
if (RT_FAILURE(rc))
return rc;
- AssertMsgReturn(u32Magic == SSMR3STRUCT_BEGIN, ("u32Magic=%#RX32\n", u32Magic), VERR_SSM_STRUCTURE_MAGIC);
+ AssertMsgReturn(u32Magic == SSMR3STRUCT_BEGIN, ("u32Magic=%#RX32\n", u32Magic), pSSM->rc = VERR_SSM_STRUCTURE_MAGIC);
/* get the fields */
for (PCSSMFIELD pCur = paFields;
@@ -6292,24 +6412,24 @@ VMMR3DECL(int) SSMR3GetStruct(PSSMHANDLE pSSM, void *pvStruct, PCSSMFIELD paFiel
break;
case SSMFIELDTRANS_GCPTR:
- AssertMsgReturn(pCur->cb == sizeof(RTGCPTR), ("%#x (%s)\n", pCur->cb, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTGCPTR), ("%#x (%s)\n", pCur->cb, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3GetGCPtr(pSSM, (PRTGCPTR)pbField);
break;
case SSMFIELDTRANS_GCPHYS:
- AssertMsgReturn(pCur->cb == sizeof(RTGCPHYS), ("%#x (%s)\n", pCur->cb, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTGCPHYS), ("%#x (%s)\n", pCur->cb, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3GetGCPhys(pSSM, (PRTGCPHYS)pbField);
break;
case SSMFIELDTRANS_RCPTR:
- AssertMsgReturn(pCur->cb == sizeof(RTRCPTR), ("%#x (%s)\n", pCur->cb, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTRCPTR), ("%#x (%s)\n", pCur->cb, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3GetRCPtr(pSSM, (PRTRCPTR)pbField);
break;
case SSMFIELDTRANS_RCPTR_ARRAY:
{
uint32_t const cEntries = pCur->cb / sizeof(RTRCPTR);
- AssertMsgReturn(pCur->cb == cEntries * sizeof(RTRCPTR) && cEntries, ("%#x (%s)\n", pCur->cb, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->cb == cEntries * sizeof(RTRCPTR) && cEntries, ("%#x (%s)\n", pCur->cb, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = VINF_SUCCESS;
for (uint32_t i = 0; i < cEntries && RT_SUCCESS(rc); i++)
rc = SSMR3GetRCPtr(pSSM, &((PRTRCPTR)pbField)[i]);
@@ -6317,17 +6437,21 @@ VMMR3DECL(int) SSMR3GetStruct(PSSMHANDLE pSSM, void *pvStruct, PCSSMFIELD paFiel
}
default:
- AssertMsgFailedReturn(("%#x\n", pCur->pfnGetPutOrTransformer), VERR_SSM_FIELD_COMPLEX);
+ AssertMsgFailedBreakStmt(("%#x\n", pCur->pfnGetPutOrTransformer), rc = VERR_SSM_FIELD_COMPLEX);
}
if (RT_FAILURE(rc))
+ {
+ if (RT_SUCCESS(pSSM->rc))
+ pSSM->rc = rc;
return rc;
+ }
}
/* end marker */
rc = SSMR3GetU32(pSSM, &u32Magic);
if (RT_FAILURE(rc))
return rc;
- AssertMsgReturn(u32Magic == SSMR3STRUCT_END, ("u32Magic=%#RX32\n", u32Magic), VERR_SSM_STRUCTURE_MAGIC);
+ AssertMsgReturn(u32Magic == SSMR3STRUCT_END, ("u32Magic=%#RX32\n", u32Magic), pSSM->rc = VERR_SSM_STRUCTURE_MAGIC);
return rc;
}
@@ -6400,7 +6524,7 @@ VMMR3DECL(int) SSMR3GetStructEx(PSSMHANDLE pSSM, void *pvStruct, size_t cbStruct
*/
SSM_ASSERT_READABLE_RET(pSSM);
SSM_CHECK_CANCELLED_RET(pSSM);
- AssertMsgReturn(!(fFlags & ~SSMSTRUCT_FLAGS_VALID_MASK), ("%#x\n", fFlags), VERR_INVALID_PARAMETER);
+ AssertMsgReturn(!(fFlags & ~SSMSTRUCT_FLAGS_VALID_MASK), ("%#x\n", fFlags), pSSM->rc = VERR_INVALID_PARAMETER);
AssertPtr(pvStruct);
AssertPtr(paFields);
@@ -6412,12 +6536,13 @@ VMMR3DECL(int) SSMR3GetStructEx(PSSMHANDLE pSSM, void *pvStruct, size_t cbStruct
rc = SSMR3GetU32(pSSM, &u32Magic);
if (RT_FAILURE(rc))
return rc;
- AssertMsgReturn(u32Magic == SSMR3STRUCT_BEGIN, ("u32Magic=%#RX32\n", u32Magic), VERR_SSM_STRUCTURE_MAGIC);
+ AssertMsgReturn(u32Magic == SSMR3STRUCT_BEGIN, ("u32Magic=%#RX32\n", u32Magic), pSSM->rc = VERR_SSM_STRUCTURE_MAGIC);
}
/*
* Put the fields
*/
+ rc = VINF_SUCCESS;
uint32_t off = 0;
for (PCSSMFIELD pCur = paFields;
pCur->cb != UINT32_MAX && pCur->off != UINT32_MAX;
@@ -6436,11 +6561,11 @@ VMMR3DECL(int) SSMR3GetStructEx(PSSMHANDLE pSSM, void *pvStruct, size_t cbStruct
&& offField + cbField <= cbStruct
&& offField + cbField >= offField,
("off=%#x cb=%#x cbStruct=%#x (%s)\n", cbField, offField, cbStruct, pCur->pszName),
- VERR_SSM_FIELD_OUT_OF_BOUNDS);
+ pSSM->rc = VERR_SSM_FIELD_OUT_OF_BOUNDS);
AssertMsgReturn( !(fFlags & SSMSTRUCT_FLAGS_FULL_STRUCT)
|| off == offField,
("off=%#x offField=%#x (%s)\n", off, offField, pCur->pszName),
- VERR_SSM_FIELD_NOT_CONSECUTIVE);
+ pSSM->rc = VERR_SSM_FIELD_NOT_CONSECUTIVE);
rc = VINF_SUCCESS;
uint8_t *pbField = (uint8_t *)pvStruct + offField;
@@ -6451,24 +6576,24 @@ VMMR3DECL(int) SSMR3GetStructEx(PSSMHANDLE pSSM, void *pvStruct, size_t cbStruct
break;
case SSMFIELDTRANS_GCPHYS:
- AssertMsgReturn(cbField == sizeof(RTGCPHYS), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(RTGCPHYS), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3GetGCPhys(pSSM, (PRTGCPHYS)pbField);
break;
case SSMFIELDTRANS_GCPTR:
- AssertMsgReturn(cbField == sizeof(RTGCPTR), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(RTGCPTR), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3GetGCPtr(pSSM, (PRTGCPTR)pbField);
break;
case SSMFIELDTRANS_RCPTR:
- AssertMsgReturn(cbField == sizeof(RTRCPTR), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(RTRCPTR), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3GetRCPtr(pSSM, (PRTRCPTR)pbField);
break;
case SSMFIELDTRANS_RCPTR_ARRAY:
{
uint32_t const cEntries = cbField / sizeof(RTRCPTR);
- AssertMsgReturn(cbField == cEntries * sizeof(RTRCPTR) && cEntries, ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == cEntries * sizeof(RTRCPTR) && cEntries, ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = VINF_SUCCESS;
for (uint32_t i = 0; i < cEntries && RT_SUCCESS(rc); i++)
rc = SSMR3GetRCPtr(pSSM, &((PRTRCPTR)pbField)[i]);
@@ -6476,14 +6601,14 @@ VMMR3DECL(int) SSMR3GetStructEx(PSSMHANDLE pSSM, void *pvStruct, size_t cbStruct
}
case SSMFIELDTRANS_HCPTR_NI:
- AssertMsgReturn(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = ssmR3GetHCPtrNI(pSSM, (void **)pbField, fFlags);
break;
case SSMFIELDTRANS_HCPTR_NI_ARRAY:
{
uint32_t const cEntries = cbField / sizeof(void *);
- AssertMsgReturn(cbField == cEntries * sizeof(void *) && cEntries, ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == cEntries * sizeof(void *) && cEntries, ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = VINF_SUCCESS;
for (uint32_t i = 0; i < cEntries && RT_SUCCESS(rc); i++)
rc = ssmR3GetHCPtrNI(pSSM, &((void **)pbField)[i], fFlags);
@@ -6491,21 +6616,21 @@ VMMR3DECL(int) SSMR3GetStructEx(PSSMHANDLE pSSM, void *pvStruct, size_t cbStruct
}
case SSMFIELDTRANS_HCPTR_HACK_U32:
- AssertMsgReturn(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
*(uintptr_t *)pbField = 0;
rc = ssmR3DataRead(pSSM, pbField, sizeof(uint32_t));
if ((fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE) && ssmR3GetHostBits(pSSM) == 64)
{
uint32_t u32;
rc = ssmR3DataRead(pSSM, &u32, sizeof(uint32_t));
- AssertMsgReturn(RT_FAILURE(rc) || u32 == 0 || (fFlags & SSMSTRUCT_FLAGS_SAVED_AS_MEM),
- ("high=%#x low=%#x (%s)\n", u32, *(uint32_t *)pbField, pCur->pszName),
- VERR_SSM_FIELD_INVALID_VALUE);
+ AssertMsgBreakStmt(RT_FAILURE(rc) || u32 == 0 || (fFlags & SSMSTRUCT_FLAGS_SAVED_AS_MEM),
+ ("high=%#x low=%#x (%s)\n", u32, *(uint32_t *)pbField, pCur->pszName),
+ rc = VERR_SSM_FIELD_INVALID_VALUE);
}
break;
case SSMFIELDTRANS_U32_ZX_U64:
- AssertMsgReturn(cbField == sizeof(uint64_t), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(uint64_t), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
((uint32_t *)pbField)[1] = 0;
rc = SSMR3GetU32(pSSM, (uint32_t *)pbField);
break;
@@ -6517,62 +6642,62 @@ VMMR3DECL(int) SSMR3GetStructEx(PSSMHANDLE pSSM, void *pvStruct, size_t cbStruct
break;
case SSMFIELDTRANS_IGN_GCPHYS:
- AssertMsgReturn(cbField == sizeof(RTGCPHYS), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(RTGCPHYS), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
rc = SSMR3Skip(pSSM, pSSM->u.Read.cbGCPhys);
break;
case SSMFIELDTRANS_IGN_GCPTR:
- AssertMsgReturn(cbField == sizeof(RTGCPTR), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(RTGCPTR), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
rc = SSMR3Skip(pSSM, pSSM->u.Read.cbGCPtr);
break;
case SSMFIELDTRANS_IGN_RCPTR:
- AssertMsgReturn(cbField == sizeof(RTRCPTR), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(RTRCPTR), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
rc = SSMR3Skip(pSSM, sizeof(RTRCPTR));
break;
case SSMFIELDTRANS_IGN_HCPTR:
- AssertMsgReturn(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(cbField == sizeof(void *), ("%#x (%s)\n", cbField, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
rc = SSMR3Skip(pSSM, ssmR3GetHostBits(pSSM) / 8);
break;
case SSMFIELDTRANS_OLD:
- AssertMsgReturn(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3Skip(pSSM, pCur->cb);
break;
case SSMFIELDTRANS_OLD_GCPHYS:
- AssertMsgReturn(pCur->cb == sizeof(RTGCPHYS) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTGCPHYS) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3Skip(pSSM, pSSM->u.Read.cbGCPhys);
break;
case SSMFIELDTRANS_OLD_GCPTR:
- AssertMsgReturn(pCur->cb == sizeof(RTGCPTR) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTGCPTR) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3Skip(pSSM, pSSM->u.Read.cbGCPtr);
break;
case SSMFIELDTRANS_OLD_RCPTR:
- AssertMsgReturn(pCur->cb == sizeof(RTRCPTR) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->cb == sizeof(RTRCPTR) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3Skip(pSSM, sizeof(RTRCPTR));
break;
case SSMFIELDTRANS_OLD_HCPTR:
- AssertMsgReturn(pCur->cb == sizeof(void *) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->cb == sizeof(void *) && pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3Skip(pSSM, ssmR3GetHostBits(pSSM) / 8);
break;
case SSMFIELDTRANS_OLD_PAD_HC:
- AssertMsgReturn(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
rc = SSMR3Skip(pSSM, ssmR3GetHostBits(pSSM) == 64 ? RT_HIWORD(pCur->cb) : RT_LOWORD(pCur->cb));
break;
case SSMFIELDTRANS_OLD_PAD_MSC32:
- AssertMsgReturn(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), VERR_SSM_FIELD_INVALID_SIZE);
+ AssertMsgBreakStmt(pCur->off == UINT32_MAX / 2, ("%#x %#x (%s)\n", pCur->cb, pCur->off, pCur->pszName), rc = VERR_SSM_FIELD_INVALID_SIZE);
if (ssmR3IsHostMsc32(pSSM))
rc = SSMR3Skip(pSSM, pCur->cb);
break;
@@ -6594,37 +6719,46 @@ VMMR3DECL(int) SSMR3GetStructEx(PSSMHANDLE pSSM, void *pvStruct, size_t cbStruct
|| ( (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_MSC32_AUTO
&& !ssmR3IsHostMsc32(pSSM))
? cb64 : cb32;
- AssertMsgReturn( cbField == cbCtx
- && ( ( pCur->off == UINT32_MAX / 2
- && ( cbField == 0
- || (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_HC_AUTO
- || (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_MSC32_AUTO
- )
+ AssertMsgBreakStmt( cbField == cbCtx
+ && ( ( pCur->off == UINT32_MAX / 2
+ && ( cbField == 0
+ || (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_HC_AUTO
+ || (uintptr_t)pCur->pfnGetPutOrTransformer == SSMFIELDTRANS_PAD_MSC32_AUTO
+ )
+ )
+ || (pCur->off != UINT32_MAX / 2 && cbField != 0)
)
- || (pCur->off != UINT32_MAX / 2 && cbField != 0)
- )
- , ("cbField=%#x cb32=%#x cb64=%#x HC_ARCH_BITS=%u cbCtx=%#x cbSaved=%#x off=%#x\n",
- cbField, cb32, cb64, HC_ARCH_BITS, cbCtx, cbSaved, pCur->off),
- VERR_SSM_FIELD_INVALID_PADDING_SIZE);
+ , ("cbField=%#x cb32=%#x cb64=%#x HC_ARCH_BITS=%u cbCtx=%#x cbSaved=%#x off=%#x\n",
+ cbField, cb32, cb64, HC_ARCH_BITS, cbCtx, cbSaved, pCur->off),
+ rc = VERR_SSM_FIELD_INVALID_PADDING_SIZE);
if (fFlags & SSMSTRUCT_FLAGS_DONT_IGNORE)
rc = SSMR3Skip(pSSM, cbSaved);
break;
}
default:
- AssertPtrReturn(pCur->pfnGetPutOrTransformer, VERR_SSM_FIELD_INVALID_CALLBACK);
+ AssertBreakStmt(pCur->pfnGetPutOrTransformer, rc = VERR_SSM_FIELD_INVALID_CALLBACK);
rc = pCur->pfnGetPutOrTransformer(pSSM, pCur, pvStruct, fFlags, true /*fGetOrPut*/, pvUser);
break;
}
if (RT_FAILURE(rc))
- return rc;
+ break;
off = offField + cbField;
}
- AssertMsgReturn( !(fFlags & SSMSTRUCT_FLAGS_FULL_STRUCT)
- || off == cbStruct,
- ("off=%#x cbStruct=%#x\n", off, cbStruct),
- VERR_SSM_FIELD_NOT_CONSECUTIVE);
+
+ if (RT_SUCCESS(rc))
+ AssertMsgStmt( !(fFlags & SSMSTRUCT_FLAGS_FULL_STRUCT)
+ || off == cbStruct,
+ ("off=%#x cbStruct=%#x\n", off, cbStruct),
+ rc = VERR_SSM_FIELD_NOT_CONSECUTIVE);
+
+ if (RT_FAILURE(rc))
+ {
+ if (RT_SUCCESS(pSSM->rc))
+ pSSM->rc = rc;
+ return rc;
+ }
/*
* End marker
@@ -6634,7 +6768,7 @@ VMMR3DECL(int) SSMR3GetStructEx(PSSMHANDLE pSSM, void *pvStruct, size_t cbStruct
rc = SSMR3GetU32(pSSM, &u32Magic);
if (RT_FAILURE(rc))
return rc;
- AssertMsgReturn(u32Magic == SSMR3STRUCT_END, ("u32Magic=%#RX32\n", u32Magic), VERR_SSM_STRUCTURE_MAGIC);
+ AssertMsgReturn(u32Magic == SSMR3STRUCT_END, ("u32Magic=%#RX32\n", u32Magic), pSSM->rc = VERR_SSM_STRUCTURE_MAGIC);
}
return VINF_SUCCESS;
@@ -7908,6 +8042,7 @@ static int ssmR3LoadExecV1(PVM pVM, PSSMHANDLE pSSM)
pSSM->rc = rc = VERR_SSM_NO_LOAD_EXEC;
break;
}
+ ssmR3UnitCritSectEnter(pUnit);
switch (pUnit->enmType)
{
case SSMUNITTYPE_DEV:
@@ -7926,6 +8061,7 @@ static int ssmR3LoadExecV1(PVM pVM, PSSMHANDLE pSSM)
rc = VERR_SSM_IPE_1;
break;
}
+ ssmR3UnitCritSectLeave(pUnit);
pUnit->fCalled = true;
if (RT_FAILURE(rc) && RT_SUCCESS_NP(pSSM->rc))
pSSM->rc = rc;
@@ -8171,6 +8307,7 @@ static int ssmR3LoadExecV2(PVM pVM, PSSMHANDLE pSSM)
pSSM->u.Read.uCurUnitPass = UnitHdr.u32Pass;
pSSM->u.Read.pCurUnit = pUnit;
ssmR3DataReadBeginV2(pSSM);
+ ssmR3UnitCritSectEnter(pUnit);
switch (pUnit->enmType)
{
case SSMUNITTYPE_DEV:
@@ -8189,6 +8326,7 @@ static int ssmR3LoadExecV2(PVM pVM, PSSMHANDLE pSSM)
rc = VERR_SSM_IPE_1;
break;
}
+ ssmR3UnitCritSectLeave(pUnit);
pUnit->fCalled = true;
if (RT_FAILURE(rc) && RT_SUCCESS_NP(pSSM->rc))
pSSM->rc = rc;
@@ -8321,7 +8459,7 @@ VMMR3DECL(int) SSMR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamO
Handle.u.Read.cHostBits, Handle.u.Read.cbGCPhys, Handle.u.Read.cbGCPtr));
if (pfnProgress)
- pfnProgress(pVM, Handle.uPercent, pvProgressUser);
+ pfnProgress(pVM->pUVM, Handle.uPercent, pvProgressUser);
/*
* Clear the per unit flags.
@@ -8341,6 +8479,7 @@ VMMR3DECL(int) SSMR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamO
{
Handle.u.Read.pCurUnit = pUnit;
pUnit->fCalled = true;
+ ssmR3UnitCritSectEnter(pUnit);
switch (pUnit->enmType)
{
case SSMUNITTYPE_DEV:
@@ -8359,6 +8498,7 @@ VMMR3DECL(int) SSMR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamO
rc = VERR_SSM_IPE_1;
break;
}
+ ssmR3UnitCritSectLeave(pUnit);
Handle.u.Read.pCurUnit = NULL;
if (RT_FAILURE(rc) && RT_SUCCESS_NP(Handle.rc))
Handle.rc = rc;
@@ -8374,7 +8514,7 @@ VMMR3DECL(int) SSMR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamO
/* end of prepare % */
if (pfnProgress)
- pfnProgress(pVM, Handle.uPercentPrepare - 1, pvProgressUser);
+ pfnProgress(pVM->pUVM, Handle.uPercentPrepare - 1, pvProgressUser);
Handle.uPercent = Handle.uPercentPrepare;
Handle.cbEstTotal = Handle.u.Read.cbLoadFile;
Handle.offEstUnitEnd = Handle.u.Read.cbLoadFile;
@@ -8412,6 +8552,7 @@ VMMR3DECL(int) SSMR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamO
Handle.u.Read.pCurUnit = pUnit;
int const rcOld = Handle.rc;
rc = VINF_SUCCESS;
+ ssmR3UnitCritSectEnter(pUnit);
switch (pUnit->enmType)
{
case SSMUNITTYPE_DEV:
@@ -8430,6 +8571,7 @@ VMMR3DECL(int) SSMR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamO
rc = VERR_SSM_IPE_1;
break;
}
+ ssmR3UnitCritSectLeave(pUnit);
Handle.u.Read.pCurUnit = NULL;
if (RT_SUCCESS(rc) && Handle.rc != rcOld)
rc = Handle.rc;
@@ -8448,7 +8590,7 @@ VMMR3DECL(int) SSMR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamO
/* progress */
if (pfnProgress)
- pfnProgress(pVM, 99, pvProgressUser);
+ pfnProgress(pVM->pUVM, 99, pvProgressUser);
ssmR3SetCancellable(pVM, &Handle, false);
ssmR3StrmClose(&Handle.Strm, Handle.rc == VERR_SSM_CANCELLED);
@@ -8462,7 +8604,7 @@ VMMR3DECL(int) SSMR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamO
{
/* progress */
if (pfnProgress)
- pfnProgress(pVM, 100, pvProgressUser);
+ pfnProgress(pVM->pUVM, 100, pvProgressUser);
Log(("SSM: Load of '%s' completed!\n", pszFilename));
}
return rc;
@@ -9131,12 +9273,14 @@ VMMR3DECL(const char *) SSMR3HandleHostOSAndArch(PSSMHANDLE pSSM)
* @retval VERR_SSM_ALREADY_CANCELLED if the operation as already been
* cancelled.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The VM handle.
*
* @thread Any.
*/
-VMMR3DECL(int) SSMR3Cancel(PVM pVM)
+VMMR3DECL(int) SSMR3Cancel(PUVM pUVM)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
int rc = RTCritSectEnter(&pVM->ssm.s.CancelCritSect);
diff --git a/src/VBox/VMM/VMMR3/STAM.cpp b/src/VBox/VMM/VMMR3/STAM.cpp
index 3d68f2f7..6b47ef64 100644
--- a/src/VBox/VMM/VMMR3/STAM.cpp
+++ b/src/VBox/VMM/VMMR3/STAM.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -58,12 +58,19 @@
#include <iprt/assert.h>
#include <iprt/asm.h>
-#include <iprt/alloc.h>
+#include <iprt/mem.h>
#include <iprt/stream.h>
#include <iprt/string.h>
/*******************************************************************************
+* Defined Constants And Macros *
+*******************************************************************************/
+/** The maximum name length excluding the terminator. */
+#define STAM_MAX_NAME_LEN 239
+
+
+/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
/**
@@ -71,8 +78,8 @@
*/
typedef struct STAMR3PRINTONEARGS
{
- PVM pVM;
- void *pvArg;
+ PUVM pUVM;
+ void *pvArg;
DECLCALLBACKMEMBER(void, pfnPrintf)(struct STAMR3PRINTONEARGS *pvArg, const char *pszFormat, ...);
} STAMR3PRINTONEARGS, *PSTAMR3PRINTONEARGS;
@@ -132,6 +139,9 @@ typedef struct STAMR0SAMPLE
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
+#ifdef STAM_WITH_LOOKUP_TREE
+static void stamR3LookupDestroyTree(PSTAMLOOKUP pRoot);
+#endif
static int stamR3RegisterU(PUVM pUVM, void *pvSample, PFNSTAMR3CALLBACKRESET pfnReset, PFNSTAMR3CALLBACKPRINT pfnPrint,
STAMTYPE enmType, STAMVISIBILITY enmVisibility, const char *pszName, STAMUNIT enmUnit, const char *pszDesc);
static int stamR3ResetOne(PSTAMDESC pDesc, void *pvArg);
@@ -150,9 +160,9 @@ static void stamR3Ring0StatsUpdateU(PUVM pUVM, const char *pszPa
static void stamR3Ring0StatsUpdateMultiU(PUVM pUVM, const char * const *papszExpressions, unsigned cExpressions);
#ifdef VBOX_WITH_DEBUGGER
-static DECLCALLBACK(int) stamR3CmdStats(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
+static FNDBGCCMD stamR3CmdStats;
static DECLCALLBACK(void) stamR3EnumDbgfPrintf(PSTAMR3PRINTONEARGS pArgs, const char *pszFormat, ...);
-static DECLCALLBACK(int) stamR3CmdStatsReset(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
+static FNDBGCCMD stamR3CmdStatsReset;
#endif
@@ -249,8 +259,8 @@ static const STAMR0SAMPLE g_aGMMStats[] =
{ RT_UOFFSETOF(GMMSTATS, VMStats.enmPolicy), STAMTYPE_U32, STAMUNIT_NONE, "/GMM/VM/enmPolicy", "The current over-commit policy." },
{ RT_UOFFSETOF(GMMSTATS, VMStats.enmPriority), STAMTYPE_U32, STAMUNIT_NONE, "/GMM/VM/enmPriority", "The VM priority for arbitrating VMs in low and out of memory situation." },
{ RT_UOFFSETOF(GMMSTATS, VMStats.fBallooningEnabled), STAMTYPE_BOOL, STAMUNIT_NONE, "/GMM/VM/fBallooningEnabled", "Whether ballooning is enabled or not." },
- { RT_UOFFSETOF(GMMSTATS, VMStats.fBallooningEnabled), STAMTYPE_BOOL, STAMUNIT_NONE, "/GMM/VM/fSharedPagingEnabled", "Whether shared paging is enabled or not." },
- { RT_UOFFSETOF(GMMSTATS, VMStats.fBallooningEnabled), STAMTYPE_BOOL, STAMUNIT_NONE, "/GMM/VM/fMayAllocate", "Whether the VM is allowed to allocate memory or not." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.fSharedPagingEnabled), STAMTYPE_BOOL, STAMUNIT_NONE, "/GMM/VM/fSharedPagingEnabled", "Whether shared paging is enabled or not." },
+ { RT_UOFFSETOF(GMMSTATS, VMStats.fMayAllocate), STAMTYPE_BOOL, STAMUNIT_NONE, "/GMM/VM/fMayAllocate", "Whether the VM is allowed to allocate memory or not." },
};
@@ -271,11 +281,38 @@ VMMR3DECL(int) STAMR3InitUVM(PUVM pUVM)
AssertRelease(sizeof(pUVM->stam.s) <= sizeof(pUVM->stam.padding));
/*
- * Setup any fixed pointers and offsets.
+ * Initialize the read/write lock and list.
*/
int rc = RTSemRWCreate(&pUVM->stam.s.RWSem);
AssertRCReturn(rc, rc);
+ RTListInit(&pUVM->stam.s.List);
+
+#ifdef STAM_WITH_LOOKUP_TREE
+ /*
+ * Initialize the root node.
+ */
+ PSTAMLOOKUP pRoot = (PSTAMLOOKUP)RTMemAlloc(sizeof(STAMLOOKUP));
+ if (!pRoot)
+ {
+ RTSemRWDestroy(pUVM->stam.s.RWSem);
+ pUVM->stam.s.RWSem = NIL_RTSEMRW;
+ return VERR_NO_MEMORY;
+ }
+ pRoot->pParent = NULL;
+ pRoot->papChildren = NULL;
+ pRoot->pDesc = NULL;
+ pRoot->cDescsInTree = 0;
+ pRoot->cChildren = 0;
+ pRoot->iParent = UINT16_MAX;
+ pRoot->off = 0;
+ pRoot->cch = 0;
+ pRoot->szName[0] = '\0';
+
+ pUVM->stam.s.pRoot = pRoot;
+#endif
+
+
/*
* Register the ring-0 statistics (GVMM/GMM).
*/
@@ -308,14 +345,19 @@ VMMR3DECL(void) STAMR3TermUVM(PUVM pUVM)
/*
* Free used memory and the RWLock.
*/
- PSTAMDESC pCur = pUVM->stam.s.pHead;
- while (pCur)
+ PSTAMDESC pCur, pNext;
+ RTListForEachSafe(&pUVM->stam.s.List, pCur, pNext, STAMDESC, ListEntry)
{
- void *pvFree = pCur;
- pCur = pCur->pNext;
- RTMemFree(pvFree);
+#ifdef STAM_WITH_LOOKUP_TREE
+ pCur->pLookup->pDesc = NULL;
+#endif
+ RTMemFree(pCur);
}
- pUVM->stam.s.pHead = NULL;
+
+#ifdef STAM_WITH_LOOKUP_TREE
+ stamR3LookupDestroyTree(pUVM->stam.s.pRoot);
+ pUVM->stam.s.pRoot = NULL;
+#endif
Assert(pUVM->stam.s.RWSem != NIL_RTSEMRW);
RTSemRWDestroy(pUVM->stam.s.RWSem);
@@ -348,6 +390,7 @@ VMMR3DECL(void) STAMR3TermUVM(PUVM pUVM)
VMMR3DECL(int) STAMR3RegisterU(PUVM pUVM, void *pvSample, STAMTYPE enmType, STAMVISIBILITY enmVisibility, const char *pszName, STAMUNIT enmUnit, const char *pszDesc)
{
AssertReturn(enmType != STAMTYPE_CALLBACK, VERR_INVALID_PARAMETER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
return stamR3RegisterU(pUVM, pvSample, NULL, NULL, enmType, enmVisibility, pszName, enmUnit, pszDesc);
}
@@ -450,14 +493,11 @@ VMMR3DECL(int) STAMR3RegisterVU(PUVM pUVM, void *pvSample, STAMTYPE enmType, ST
{
AssertReturn(enmType != STAMTYPE_CALLBACK, VERR_INVALID_PARAMETER);
- char *pszFormattedName;
- RTStrAPrintfV(&pszFormattedName, pszName, args);
- if (!pszFormattedName)
- return VERR_NO_MEMORY;
+ char szFormattedName[STAM_MAX_NAME_LEN + 8];
+ size_t cch = RTStrPrintfV(szFormattedName, sizeof(szFormattedName), pszName, args);
+ AssertReturn(cch <= STAM_MAX_NAME_LEN, VERR_OUT_OF_RANGE);
- int rc = STAMR3RegisterU(pUVM, pvSample, enmType, enmVisibility, pszFormattedName, enmUnit, pszDesc);
- RTStrFree(pszFormattedName);
- return rc;
+ return STAMR3RegisterU(pUVM, pvSample, enmType, enmVisibility, szFormattedName, enmUnit, pszDesc);
}
@@ -577,6 +617,624 @@ static int stamR3SlashCompare(const char *psz1, const char *psz2)
#endif /* VBOX_STRICT */
+#ifdef STAM_WITH_LOOKUP_TREE
+
+/**
+ * Compares a lookup node with a name.
+ *
+ * @returns like strcmp and memcmp.
+ * @param pNode The lookup node.
+ * @param pchName The name, not necessarily terminated.
+ * @param cchName The length of the name.
+ */
+DECL_FORCE_INLINE(int) stamR3LookupCmp(PSTAMLOOKUP pNode, const char *pchName, uint32_t cchName)
+{
+ uint32_t cchComp = RT_MIN(pNode->cch, cchName);
+ int iDiff = memcmp(pNode->szName, pchName, cchComp);
+ if (!iDiff && pNode->cch != cchName)
+ iDiff = pNode->cch > cchName ? 2 : -2;
+ return iDiff;
+}
+
+
+/**
+ * Creates a new lookup child node.
+ *
+ * @returns Pointer to the newly created lookup node.
+ * @param pParent The parent node.
+ * @param pchName The name (not necessarily terminated).
+ * @param cchName The length of the name.
+ * @param offName The offset of the node in a path.
+ * @param iChild Child index of a node that's before the one
+ * we're inserting (returned by
+ * stamR3LookupFindChild).
+ */
+static PSTAMLOOKUP stamR3LookupNewChild(PSTAMLOOKUP pParent, const char *pchName, uint32_t cchName, uint32_t offName,
+ uint32_t iChild)
+{
+ Assert(cchName <= UINT8_MAX);
+ Assert(offName <= UINT8_MAX);
+ Assert(iChild < UINT16_MAX);
+
+ /*
+ * Allocate a new entry.
+ */
+ PSTAMLOOKUP pNew = (PSTAMLOOKUP)RTMemAlloc(RT_OFFSETOF(STAMLOOKUP, szName[cchName + 1]));
+ if (!pNew)
+ return NULL;
+ pNew->pParent = pParent;
+ pNew->papChildren = NULL;
+ pNew->pDesc = NULL;
+ pNew->cDescsInTree = 0;
+ pNew->cChildren = 0;
+ pNew->cch = (uint16_t)cchName;
+ pNew->off = (uint16_t)offName;
+ memcpy(pNew->szName, pchName, cchName);
+ pNew->szName[cchName] = '\0';
+
+ /*
+ * Reallocate the array?
+ */
+ if (RT_IS_POWER_OF_TWO(pParent->cChildren))
+ {
+ uint32_t cNew = pParent->cChildren ? (uint32_t)pParent->cChildren * 2 : 8;
+ AssertReturnStmt(cNew <= 0x8000, RTMemFree(pNew), NULL);
+ void *pvNew = RTMemRealloc(pParent->papChildren, cNew * sizeof(pParent->papChildren[0]));
+ if (!pvNew)
+ {
+ RTMemFree(pNew);
+ return NULL;
+ }
+ pParent->papChildren = (PSTAMLOOKUP *)pvNew;
+ }
+
+ /*
+ * Find the exact insertion point using iChild as a very good clue from
+ * the find function.
+ */
+ if (!pParent->cChildren)
+ iChild = 0;
+ else
+ {
+ if (iChild >= pParent->cChildren)
+ iChild = pParent->cChildren - 1;
+ while ( iChild < pParent->cChildren
+ && stamR3LookupCmp(pParent->papChildren[iChild], pchName, cchName) < 0)
+ iChild++;
+ }
+
+ /*
+ * Insert it.
+ */
+ if (iChild < pParent->cChildren)
+ {
+ /* Do shift. */
+ uint32_t i = pParent->cChildren;
+ while (i > iChild)
+ {
+ PSTAMLOOKUP pNode = pParent->papChildren[i - 1];
+ pParent->papChildren[i] = pNode;
+ pNode->iParent = i;
+ i--;
+ }
+ }
+
+ pNew->iParent = iChild;
+ pParent->papChildren[iChild] = pNew;
+ pParent->cChildren++;
+
+ return pNew;
+}
+
+
+/**
+ * Looks up a child.
+ *
+ * @returns Pointer to child node if found, NULL if not.
+ * @param pParent The parent node.
+ * @param pchName The name (not necessarily terminated).
+ * @param cchName The length of the name.
+ * @param piChild Where to store a child index suitable for
+ * passing to stamR3LookupNewChild when NULL is
+ * returned.
+ */
+static PSTAMLOOKUP stamR3LookupFindChild(PSTAMLOOKUP pParent, const char *pchName, uint32_t cchName, uint32_t *piChild)
+{
+ uint32_t iChild = pParent->cChildren;
+ if (iChild > 4)
+ {
+ uint32_t iFirst = 0;
+ uint32_t iEnd = iChild;
+ iChild /= 2;
+ for (;;)
+ {
+ int iDiff = stamR3LookupCmp(pParent->papChildren[iChild], pchName, cchName);
+ if (!iDiff)
+ {
+ if (piChild)
+ *piChild = iChild;
+ return pParent->papChildren[iChild];
+ }
+
+ /* Split. */
+ if (iDiff < 0)
+ {
+ iFirst = iChild + 1;
+ if (iFirst >= iEnd)
+ {
+ if (piChild)
+ *piChild = iChild;
+ break;
+ }
+ }
+ else
+ {
+ if (iChild == iFirst)
+ {
+ if (piChild)
+ *piChild = iChild ? iChild - 1 : 0;
+ break;
+ }
+ iEnd = iChild;
+ }
+
+ /* Calc next child. */
+ iChild = (iEnd - iFirst) / 2 + iFirst;
+ }
+ return NULL;
+ }
+
+ /*
+ * Linear search.
+ */
+ while (iChild-- > 0)
+ {
+ int iDiff = stamR3LookupCmp(pParent->papChildren[iChild], pchName, cchName);
+ if (iDiff <= 0)
+ {
+ if (piChild)
+ *piChild = iChild;
+ return !iDiff ? pParent->papChildren[iChild] : NULL;
+ }
+ }
+ if (piChild)
+ *piChild = 0;
+ return NULL;
+}
+
+
+/**
+ * Find the next sample descriptor node.
+ *
+ * This is for use with insertion in the big list and pattern range lookups.
+ *
+ * @returns Pointer to the next sample descriptor. NULL if not found (i.e.
+ * we're at the end of the list).
+ * @param pLookup The current node.
+ */
+static PSTAMDESC stamR3LookupFindNextWithDesc(PSTAMLOOKUP pLookup)
+{
+ Assert(!pLookup->pDesc);
+ PSTAMLOOKUP pCur = pLookup;
+ uint32_t iCur = 0;
+ for (;;)
+ {
+ /*
+ * Check all children.
+ */
+ uint32_t cChildren = pCur->cChildren;
+ if (iCur < cChildren)
+ {
+ PSTAMLOOKUP *papChildren = pCur->papChildren;
+ do
+ {
+ PSTAMLOOKUP pChild = pCur->papChildren[iCur];
+ if (pChild->pDesc)
+ return pChild->pDesc;
+
+ if (pChild->cChildren > 0)
+ {
+ /* One level down. */
+ iCur = 0;
+ pCur = pChild;
+ break;
+ }
+ } while (++iCur < cChildren);
+ }
+ else
+ {
+ /*
+ * One level up, resuming after the current.
+ */
+ iCur = pCur->iParent + 1;
+ pCur = pCur->pParent;
+ if (!pCur)
+ return NULL;
+ }
+ }
+}
+
+
+/**
+ * Look up a sample descriptor by name.
+ *
+ * @returns Pointer to a sample descriptor.
+ * @param pRoot The root node.
+ * @param pszName The name to lookup.
+ */
+static PSTAMDESC stamR3LookupFindDesc(PSTAMLOOKUP pRoot, const char *pszName)
+{
+ Assert(!pRoot->pParent);
+ while (*pszName++ == '/')
+ {
+ const char *pszEnd = strchr(pszName, '/');
+ uint32_t cch = pszEnd ? pszEnd - pszName : (uint32_t)strlen(pszName);
+ PSTAMLOOKUP pChild = stamR3LookupFindChild(pRoot, pszName, cch, NULL);
+ if (!pChild)
+ break;
+ if (!pszEnd)
+ return pChild->pDesc;
+ pszName = pszEnd;
+ pRoot = pChild;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Finds the first sample descriptor for a given lookup range.
+ *
+ * This is for pattern range lookups.
+ *
+ * @returns Pointer to the first descriptor.
+ * @param pFirst The first node in the range.
+ * @param pLast The last node in the range.
+ */
+static PSTAMDESC stamR3LookupFindFirstDescForRange(PSTAMLOOKUP pFirst, PSTAMLOOKUP pLast)
+{
+ if (pFirst->pDesc)
+ return pFirst->pDesc;
+
+ PSTAMLOOKUP pCur = pFirst;
+ uint32_t iCur = 0;
+ for (;;)
+ {
+ uint32_t cChildren = pCur->cChildren;
+ if (iCur < pCur->cChildren)
+ {
+ /*
+ * Check all children.
+ */
+ PSTAMLOOKUP *papChildren = pCur->papChildren;
+ do
+ {
+ PSTAMLOOKUP pChild = pCur->papChildren[iCur];
+ if (pChild->pDesc)
+ return pChild->pDesc;
+ if (pChild->cChildren > 0)
+ {
+ /* One level down. */
+ iCur = 0;
+ pCur = pChild;
+ break;
+ }
+ if (pChild == pLast)
+ return NULL;
+ } while (++iCur < cChildren);
+ }
+ else
+ {
+ /*
+ * One level up, checking current and its 'older' sibilings.
+ */
+ if (pCur == pLast)
+ return NULL;
+ iCur = pCur->iParent + 1;
+ pCur = pCur->pParent;
+ if (!pCur)
+ break;
+ }
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Finds the first sample descriptor for a given lookup range.
+ *
+ * This is for pattern range lookups.
+ *
+ * @returns Pointer to the first descriptor.
+ * @param pFirst The first node in the range.
+ * @param pLast The last node in the range.
+ */
+static PSTAMDESC stamR3LookupFindLastDescForRange(PSTAMLOOKUP pFirst, PSTAMLOOKUP pLast)
+{
+ PSTAMLOOKUP pCur = pLast;
+ uint32_t iCur = pCur->cChildren - 1;
+ for (;;)
+ {
+ if (iCur < pCur->cChildren)
+ {
+ /*
+ * Check children backwards, depth first.
+ */
+ PSTAMLOOKUP *papChildren = pCur->papChildren;
+ do
+ {
+ PSTAMLOOKUP pChild = pCur->papChildren[iCur];
+ if (pChild->cChildren > 0)
+ {
+ /* One level down. */
+ iCur = pChild->cChildren - 1;
+ pCur = pChild;
+ break;
+ }
+
+ if (pChild->pDesc)
+ return pChild->pDesc;
+ if (pChild == pFirst)
+ return NULL;
+ } while (iCur-- > 0); /* (underflow handled above) */
+ }
+ else
+ {
+ /*
+ * One level up, checking current and its 'older' sibilings.
+ */
+ if (pCur->pDesc)
+ return pCur->pDesc;
+ if (pCur == pFirst)
+ return NULL;
+ iCur = pCur->iParent - 1; /* (underflow handled above) */
+ pCur = pCur->pParent;
+ if (!pCur)
+ break;
+ }
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Look up the first and last descriptors for a (single) pattern expression.
+ *
+ * This is used to optimize pattern enumerations and doesn't have to return 100%
+ * accurate results if that costs too much.
+ *
+ * @returns Pointer to the first descriptor in the range.
+ * @param pRoot The root node.
+ * @param pList The descriptor list anchor.
+ * @param pszPat The name patter to lookup.
+ * @param ppLastDesc Where to store the address of the last
+ * descriptor (approximate).
+ */
+static PSTAMDESC stamR3LookupFindPatternDescRange(PSTAMLOOKUP pRoot, PRTLISTANCHOR pList, const char *pszPat,
+ PSTAMDESC *ppLastDesc)
+{
+ Assert(!pRoot->pParent);
+
+ /*
+ * If there is an early enough wildcard, the whole list needs to be searched.
+ */
+ if ( pszPat[0] == '*' || pszPat[0] == '?'
+ || pszPat[1] == '*' || pszPat[1] == '?')
+ {
+ *ppLastDesc = RTListGetLast(pList, STAMDESC, ListEntry);
+ return RTListGetFirst(pList, STAMDESC, ListEntry);
+ }
+
+ /*
+ * All statistics starts with a slash.
+ */
+ while ( *pszPat++ == '/'
+ && pRoot->cDescsInTree > 0
+ && pRoot->cChildren > 0)
+ {
+ const char *pszEnd = strchr(pszPat, '/');
+ uint32_t cch = pszEnd ? pszEnd - pszPat : (uint32_t)strlen(pszPat);
+ if (!cch)
+ break;
+
+ const char *pszPat1 = (const char *)memchr(pszPat, '*', cch);
+ const char *pszPat2 = (const char *)memchr(pszPat, '?', cch);
+ if (pszPat1 || pszPat2)
+ {
+ /* We've narrowed it down to a sub-tree now. */
+ PSTAMLOOKUP pFirst = pRoot->papChildren[0];
+ PSTAMLOOKUP pLast = pRoot->papChildren[pRoot->cChildren - 1];
+ /** @todo narrow the range further if both pszPat1/2 != pszPat. */
+
+ *ppLastDesc = stamR3LookupFindLastDescForRange(pFirst, pLast);
+ return stamR3LookupFindFirstDescForRange(pFirst, pLast);
+ }
+
+ PSTAMLOOKUP pChild = stamR3LookupFindChild(pRoot, pszPat, cch, NULL);
+ if (!pChild)
+ break;
+
+ /* Advance */
+ if (!pszEnd)
+ return *ppLastDesc = pChild->pDesc;
+ pszPat = pszEnd;
+ pRoot = pChild;
+ }
+
+ /* No match. */
+ *ppLastDesc = NULL;
+ return NULL;
+}
+
+
+/**
+ * Increments the cDescInTree member of the given node an all ancestors.
+ *
+ * @param pLookup The lookup node.
+ */
+static void stamR3LookupIncUsage(PSTAMLOOKUP pLookup)
+{
+ Assert(pLookup->pDesc);
+
+ PSTAMLOOKUP pCur = pLookup;
+ while (pCur != NULL)
+ {
+ pCur->cDescsInTree++;
+ pCur = pCur->pParent;
+ }
+}
+
+
+/**
+ * Descrements the cDescInTree member of the given node an all ancestors.
+ *
+ * @param pLookup The lookup node.
+ */
+static void stamR3LookupDecUsage(PSTAMLOOKUP pLookup)
+{
+ Assert(!pLookup->pDesc);
+
+ PSTAMLOOKUP pCur = pLookup;
+ while (pCur != NULL)
+ {
+ Assert(pCur->cDescsInTree > 0);
+ pCur->cDescsInTree--;
+ pCur = pCur->pParent;
+ }
+}
+
+
+/**
+ * Frees empty lookup nodes if it's worth it.
+ *
+ * @param pLookup The lookup node.
+ */
+static void stamR3LookupMaybeFree(PSTAMLOOKUP pLookup)
+{
+ Assert(!pLookup->pDesc);
+
+ /*
+ * Free between two and three levels of nodes. Freeing too much most
+ * likely wasted effort since we're either going to repopluate the tree
+ * or quit the whole thing.
+ */
+ if (pLookup->cDescsInTree > 0)
+ return;
+
+ PSTAMLOOKUP pCur = pLookup->pParent;
+ if (!pCur)
+ return;
+ if (pCur->cDescsInTree > 0)
+ return;
+ PSTAMLOOKUP pParent = pCur->pParent;
+ if (pParent)
+ return;
+
+ if (pParent->cDescsInTree == 0 && pParent->pParent)
+ {
+ pCur = pParent;
+ pParent = pCur->pParent;
+ }
+
+ /*
+ * Remove pCur from pParent.
+ */
+ PSTAMLOOKUP *papChildren = pParent->papChildren;
+ uint32_t cChildren = --pParent->cChildren;
+ for (uint32_t i = pCur->iParent; i < cChildren; i++)
+ {
+ PSTAMLOOKUP pChild = papChildren[i + 1];
+ pChild->iParent = i;
+ papChildren[i] = pChild;
+ }
+ pCur->pParent = NULL;
+
+ /*
+ * Destroy pCur.
+ */
+ stamR3LookupDestroyTree(pCur);
+}
+
+
+/**
+ * Destroys a lookup tree.
+ *
+ * This is used by STAMR3Term as well as stamR3LookupMaybeFree.
+ *
+ * @param pRoot The root of the tree (must have no parent).
+ */
+static void stamR3LookupDestroyTree(PSTAMLOOKUP pRoot)
+{
+ Assert(pRoot); Assert(!pRoot->pParent);
+ PSTAMLOOKUP pCur = pRoot;
+ for (;;)
+ {
+ uint32_t i = pCur->cChildren;
+ if (i > 0)
+ {
+ /*
+ * Push child (with leaf optimization).
+ */
+ PSTAMLOOKUP pChild = pCur->papChildren[--i];
+ if (pChild->cChildren != 0)
+ pCur = pChild;
+ else
+ {
+ /* free leaves. */
+ for (;;)
+ {
+ if (pChild->papChildren)
+ {
+ RTMemFree(pChild->papChildren);
+ pChild->papChildren = NULL;
+ }
+ RTMemFree(pChild);
+ pCur->papChildren[i] = NULL;
+
+ /* next */
+ if (i == 0)
+ {
+ pCur->cChildren = 0;
+ break;
+ }
+ pChild = pCur->papChildren[--i];
+ if (pChild->cChildren != 0)
+ {
+ pCur->cChildren = i + 1;
+ pCur = pChild;
+ break;
+ }
+ }
+ }
+ }
+ else
+ {
+ /*
+ * Pop and free current.
+ */
+ Assert(!pCur->pDesc);
+
+ PSTAMLOOKUP pParent = pCur->pParent;
+ Assert(pCur->iParent == (pParent ? pParent->cChildren - 1 : UINT16_MAX));
+
+ RTMemFree(pCur->papChildren);
+ pCur->papChildren = NULL;
+ RTMemFree(pCur);
+
+ pCur = pParent;
+ if (!pCur)
+ break;
+ pCur->papChildren[--pCur->cChildren] = NULL;
+ }
+ }
+}
+
+#endif /* STAM_WITH_LOOKUP_TREE */
+
+
+
/**
* Internal worker for the different register calls.
*
@@ -596,14 +1254,65 @@ static int stamR3SlashCompare(const char *psz1, const char *psz2)
static int stamR3RegisterU(PUVM pUVM, void *pvSample, PFNSTAMR3CALLBACKRESET pfnReset, PFNSTAMR3CALLBACKPRINT pfnPrint,
STAMTYPE enmType, STAMVISIBILITY enmVisibility, const char *pszName, STAMUNIT enmUnit, const char *pszDesc)
{
+ AssertReturn(pszName[0] == '/', VERR_INVALID_NAME);
+ AssertReturn(pszName[1] != '/' && pszName[1], VERR_INVALID_NAME);
+ uint32_t const cchName = (uint32_t)strlen(pszName);
+ AssertReturn(cchName <= STAM_MAX_NAME_LEN, VERR_OUT_OF_RANGE);
+ AssertReturn(pszName[cchName - 1] != '/', VERR_INVALID_NAME);
+ AssertReturn(memchr(pszName, '\\', cchName) == NULL, VERR_INVALID_NAME);
+
STAM_LOCK_WR(pUVM);
/*
- * Check if exists.
+ * Look up the tree location, populating the lookup tree as we walk it.
*/
- PSTAMDESC pPrev = NULL;
- PSTAMDESC pCur = pUVM->stam.s.pHead;
- while (pCur)
+#ifdef STAM_WITH_LOOKUP_TREE
+ PSTAMLOOKUP pLookup = pUVM->stam.s.pRoot; Assert(pLookup);
+ uint32_t offName = 1;
+ for (;;)
+ {
+ /* Get the next part of the path. */
+ const char *pszStart = &pszName[offName];
+ const char *pszEnd = strchr(pszStart, '/');
+ uint32_t cch = pszEnd ? (uint32_t)(pszEnd - pszStart) : cchName - offName;
+ if (cch == 0)
+ {
+ STAM_UNLOCK_WR(pUVM);
+ AssertMsgFailed(("No double or trailing slashes are allowed: '%s'\n", pszName));
+ return VERR_INVALID_NAME;
+ }
+
+ /* Do the looking up. */
+ uint32_t iChild = 0;
+ PSTAMLOOKUP pChild = stamR3LookupFindChild(pLookup, pszStart, cch, &iChild);
+ if (!pChild)
+ {
+ pChild = stamR3LookupNewChild(pLookup, pszStart, cch, offName, iChild);
+ if (!pChild)
+ {
+ STAM_UNLOCK_WR(pUVM);
+ return VERR_NO_MEMORY;
+ }
+ }
+
+ /* Advance. */
+ pLookup = pChild;
+ if (!pszEnd)
+ break;
+ offName += cch + 1;
+ }
+ if (pLookup->pDesc)
+ {
+ STAM_UNLOCK_WR(pUVM);
+ AssertMsgFailed(("Duplicate sample name: %s\n", pszName));
+ return VERR_ALREADY_EXISTS;
+ }
+
+ PSTAMDESC pCur = stamR3LookupFindNextWithDesc(pLookup);
+
+#else
+ PSTAMDESC pCur;
+ RTListForEach(&pUVM->stam.s.List, pCur, STAMDESC, ListEntry)
{
int iDiff = strcmp(pCur->pszName, pszName);
/* passed it */
@@ -616,24 +1325,24 @@ static int stamR3RegisterU(PUVM pUVM, void *pvSample, PFNSTAMR3CALLBACKRESET pfn
AssertMsgFailed(("Duplicate sample name: %s\n", pszName));
return VERR_ALREADY_EXISTS;
}
-
- /* next */
- pPrev = pCur;
- pCur = pCur->pNext;
}
+#endif
/*
* Check that the name doesn't screw up sorting order when taking
* slashes into account. The QT4 GUI makes some assumptions.
* Problematic chars are: !"#$%&'()*+,-.
*/
+#ifdef VBOX_STRICT
Assert(pszName[0] == '/');
- if (pPrev)
- Assert(stamR3SlashCompare(pPrev->pszName, pszName) < 0);
- if (pCur)
- Assert(stamR3SlashCompare(pCur->pszName, pszName) > 0);
+ PSTAMDESC pPrev = pCur
+ ? RTListGetPrev(&pUVM->stam.s.List, pCur, STAMDESC, ListEntry)
+ : RTListGetLast(&pUVM->stam.s.List, STAMDESC, ListEntry);
+ Assert(!pPrev || strcmp(pszName, pPrev->pszName) > 0);
+ Assert(!pCur || strcmp(pszName, pCur->pszName) < 0);
+ Assert(!pPrev || stamR3SlashCompare(pPrev->pszName, pszName) < 0);
+ Assert(!pCur || stamR3SlashCompare(pCur->pszName, pszName) > 0);
-#ifdef VBOX_STRICT
/*
* Check alignment requirements.
*/
@@ -688,12 +1397,11 @@ static int stamR3RegisterU(PUVM pUVM, void *pvSample, PFNSTAMR3CALLBACKRESET pfn
* Create a new node and insert it at the current location.
*/
int rc;
- size_t cchName = strlen(pszName) + 1;
- size_t cchDesc = pszDesc ? strlen(pszDesc) + 1 : 0;
- PSTAMDESC pNew = (PSTAMDESC)RTMemAlloc(sizeof(*pNew) + cchName + cchDesc);
+ size_t cbDesc = pszDesc ? strlen(pszDesc) + 1 : 0;
+ PSTAMDESC pNew = (PSTAMDESC)RTMemAlloc(sizeof(*pNew) + cchName + 1 + cbDesc);
if (pNew)
{
- pNew->pszName = (char *)memcpy((char *)(pNew + 1), pszName, cchName);
+ pNew->pszName = (char *)memcpy((char *)(pNew + 1), pszName, cchName + 1);
pNew->enmType = enmType;
pNew->enmVisibility = enmVisibility;
if (enmType != STAMTYPE_CALLBACK)
@@ -707,13 +1415,18 @@ static int stamR3RegisterU(PUVM pUVM, void *pvSample, PFNSTAMR3CALLBACKRESET pfn
pNew->enmUnit = enmUnit;
pNew->pszDesc = NULL;
if (pszDesc)
- pNew->pszDesc = (char *)memcpy((char *)(pNew + 1) + cchName, pszDesc, cchDesc);
+ pNew->pszDesc = (char *)memcpy((char *)(pNew + 1) + cchName + 1, pszDesc, cbDesc);
- pNew->pNext = pCur;
- if (pPrev)
- pPrev->pNext = pNew;
+ if (pCur)
+ RTListNodeInsertBefore(&pCur->ListEntry, &pNew->ListEntry);
else
- pUVM->stam.s.pHead = pNew;
+ RTListAppend(&pUVM->stam.s.List, &pNew->ListEntry);
+
+#ifdef STAM_WITH_LOOKUP_TREE
+ pNew->pLookup = pLookup;
+ pLookup->pDesc = pNew;
+ stamR3LookupIncUsage(pLookup);
+#endif
stamR3ResetOne(pNew, pUVM->pVM);
rc = VINF_SUCCESS;
@@ -727,7 +1440,29 @@ static int stamR3RegisterU(PUVM pUVM, void *pvSample, PFNSTAMR3CALLBACKRESET pfn
/**
- * Deregisters a sample previously registered by STAR3Register().
+ * Destroys the statistics descriptor, unlinking it and freeing all resources.
+ *
+ * @returns VINF_SUCCESS
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pCur The descriptor to destroy.
+ */
+static int stamR3DestroyDesc(PUVM pUVM, PSTAMDESC pCur)
+{
+ RTListNodeRemove(&pCur->ListEntry);
+#ifdef STAM_WITH_LOOKUP_TREE
+ pCur->pLookup->pDesc = NULL; /** @todo free lookup nodes once it's working. */
+ stamR3LookupDecUsage(pCur->pLookup);
+ stamR3LookupMaybeFree(pCur->pLookup);
+#endif
+ RTMemFree(pCur);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Deregisters a sample previously registered by STAR3Register() given its
+ * address.
*
* This is intended used for devices which can be unplugged and for
* temporary samples.
@@ -736,36 +1471,69 @@ static int stamR3RegisterU(PUVM pUVM, void *pvSample, PFNSTAMR3CALLBACKRESET pfn
* @param pUVM Pointer to the user mode VM structure.
* @param pvSample Pointer to the sample registered with STAMR3Register().
*/
-VMMR3DECL(int) STAMR3DeregisterU(PUVM pUVM, void *pvSample)
+VMMR3DECL(int) STAMR3DeregisterByAddr(PUVM pUVM, void *pvSample)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /* This is a complete waste of time when shutting down. */
+ VMSTATE enmState = VMR3GetStateU(pUVM);
+ if (enmState >= VMSTATE_DESTROYING)
+ return VINF_SUCCESS;
+
STAM_LOCK_WR(pUVM);
/*
* Search for it.
*/
int rc = VERR_INVALID_HANDLE;
- PSTAMDESC pPrev = NULL;
- PSTAMDESC pCur = pUVM->stam.s.pHead;
- while (pCur)
+ PSTAMDESC pCur, pNext;
+ RTListForEachSafe(&pUVM->stam.s.List, pCur, pNext, STAMDESC, ListEntry)
{
if (pCur->u.pv == pvSample)
+ rc = stamR3DestroyDesc(pUVM, pCur);
+ }
+
+ STAM_UNLOCK_WR(pUVM);
+ return rc;
+}
+
+
+/**
+ * Worker for STAMR3Deregister, STAMR3DeregisterV and STAMR3DeregisterF.
+ *
+ * @returns VBox status code.
+ * @retval VWRN_NOT_FOUND if no matching names found.
+ *
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pszPat The name pattern.
+ */
+static int stamR3DeregisterByPattern(PUVM pUVM, const char *pszPat)
+{
+ Assert(!strchr(pszPat, '|')); /* single pattern! */
+
+ int rc = VWRN_NOT_FOUND;
+ STAM_LOCK_WR(pUVM);
+
+ PSTAMDESC pLast;
+ PSTAMDESC pCur = stamR3LookupFindPatternDescRange(pUVM->stam.s.pRoot, &pUVM->stam.s.List, pszPat, &pLast);
+ if (pCur)
+ {
+ for (;;)
{
- void *pvFree = pCur;
- pCur = pCur->pNext;
- if (pPrev)
- pPrev->pNext = pCur;
- else
- pUVM->stam.s.pHead = pCur;
+ PSTAMDESC pNext = RTListNodeGetNext(&pCur->ListEntry, STAMDESC, ListEntry);
- RTMemFree(pvFree);
- rc = VINF_SUCCESS;
- continue;
- }
+ if (RTStrSimplePatternMatch(pszPat, pCur->pszName))
+ rc = stamR3DestroyDesc(pUVM, pCur);
- /* next */
- pPrev = pCur;
- pCur = pCur->pNext;
+ /* advance. */
+ if (pCur == pLast)
+ break;
+ pCur = pNext;
+ }
+ Assert(pLast);
}
+ else
+ Assert(!pLast);
STAM_UNLOCK_WR(pUVM);
return rc;
@@ -773,18 +1541,71 @@ VMMR3DECL(int) STAMR3DeregisterU(PUVM pUVM, void *pvSample)
/**
- * Deregisters a sample previously registered by STAR3Register().
+ * Deregister zero or more samples given a (single) pattern matching their
+ * names.
*
- * This is intended used for devices which can be unplugged and for
- * temporary samples.
+ * @returns VBox status.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pszPat The name pattern.
+ * @sa STAMR3DeregisterF, STAMR3DeregisterV
+ */
+VMMR3DECL(int) STAMR3Deregister(PUVM pUVM, const char *pszPat)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /* This is a complete waste of time when shutting down. */
+ VMSTATE enmState = VMR3GetStateU(pUVM);
+ if (enmState >= VMSTATE_DESTROYING)
+ return VINF_SUCCESS;
+
+ return stamR3DeregisterByPattern(pUVM, pszPat);
+}
+
+
+/**
+ * Deregister zero or more samples given a (single) pattern matching their
+ * names.
*
* @returns VBox status.
- * @param pVM Pointer to the VM.
- * @param pvSample Pointer to the sample registered with STAMR3Register().
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pszPatFmt The name pattern format string.
+ * @param ... Format string arguments.
+ * @sa STAMR3Deregister, STAMR3DeregisterV
*/
-VMMR3DECL(int) STAMR3Deregister(PVM pVM, void *pvSample)
+VMMR3DECL(int) STAMR3DeregisterF(PUVM pUVM, const char *pszPatFmt, ...)
{
- return STAMR3DeregisterU(pVM->pUVM, pvSample);
+ va_list va;
+ va_start(va, pszPatFmt);
+ int rc = STAMR3DeregisterV(pUVM, pszPatFmt, va);
+ va_end(va);
+ return rc;
+}
+
+
+/**
+ * Deregister zero or more samples given a (single) pattern matching their
+ * names.
+ *
+ * @returns VBox status.
+ * @param pUVM Pointer to the user mode VM structure.
+ * @param pszPatFmt The name pattern format string.
+ * @param va Format string arguments.
+ * @sa STAMR3Deregister, STAMR3DeregisterF
+ */
+VMMR3DECL(int) STAMR3DeregisterV(PUVM pUVM, const char *pszPatFmt, va_list va)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+
+ /* This is a complete waste of time when shutting down. */
+ VMSTATE enmState = VMR3GetStateU(pUVM);
+ if (enmState >= VMSTATE_DESTROYING)
+ return VINF_SUCCESS;
+
+ char szPat[STAM_MAX_NAME_LEN + 8];
+ size_t cchPat = RTStrPrintfV(szPat, sizeof(szPat), pszPatFmt, va);
+ AssertReturn(cchPat <= STAM_MAX_NAME_LEN, VERR_OUT_OF_RANGE);
+
+ return stamR3DeregisterByPattern(pUVM, szPat);
}
@@ -793,13 +1614,16 @@ VMMR3DECL(int) STAMR3Deregister(PVM pVM, void *pvSample)
* It's possible to select a subset of the samples.
*
* @returns VBox status. (Basically, it cannot fail.)
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszPat The name matching pattern. See somewhere_where_this_is_described_in_detail.
* If NULL all samples are reset.
* @remarks Don't confuse this with the other 'XYZR3Reset' methods, it's not called at VM reset.
*/
-VMMR3DECL(int) STAMR3ResetU(PUVM pUVM, const char *pszPat)
+VMMR3DECL(int) STAMR3Reset(PUVM pUVM, const char *pszPat)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
int rc = VINF_SUCCESS;
/* ring-0 */
@@ -875,22 +1699,6 @@ VMMR3DECL(int) STAMR3ResetU(PUVM pUVM, const char *pszPat)
/**
- * Resets statistics for the specified VM.
- * It's possible to select a subset of the samples.
- *
- * @returns VBox status. (Basically, it cannot fail.)
- * @param pVM Pointer to the VM.
- * @param pszPat The name matching pattern. See somewhere_where_this_is_described_in_detail.
- * If NULL all samples are reset.
- * @remarks Don't confuse this with the other 'XYZR3Reset' methods, it's not called at VM reset.
- */
-VMMR3DECL(int) STAMR3Reset(PVM pVM, const char *pszPat)
-{
- return STAMR3ResetU(pVM->pUVM, pszPat);
-}
-
-
-/**
* Resets one statistics sample.
* Callback for stamR3EnumU().
*
@@ -975,7 +1783,7 @@ static int stamR3ResetOne(PSTAMDESC pDesc, void *pvArg)
* It's possible to select a subset of the samples.
*
* @returns VBox status. (Basically, it cannot fail.)
- * @param pUVM Pointer to the user mode VM structure.
+ * @param pUVM The user mode VM handle.
* @param pszPat The name matching pattern. See somewhere_where_this_is_described_in_detail.
* If NULL all samples are reset.
* @param fWithDesc Whether to include the descriptions.
@@ -985,8 +1793,11 @@ static int stamR3ResetOne(PSTAMDESC pDesc, void *pvArg)
* The returned pointer must be freed by calling STAMR3SnapshotFree().
* @param pcchSnapshot Where to store the size of the snapshot data. (Excluding the trailing '\0')
*/
-VMMR3DECL(int) STAMR3SnapshotU(PUVM pUVM, const char *pszPat, char **ppszSnapshot, size_t *pcchSnapshot, bool fWithDesc)
+VMMR3DECL(int) STAMR3Snapshot(PUVM pUVM, const char *pszPat, char **ppszSnapshot, size_t *pcchSnapshot, bool fWithDesc)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
STAMR3SNAPSHOTONE State = { NULL, NULL, NULL, pUVM->pVM, 0, VINF_SUCCESS, fWithDesc };
/*
@@ -1022,28 +1833,6 @@ VMMR3DECL(int) STAMR3SnapshotU(PUVM pUVM, const char *pszPat, char **ppszSnapsho
/**
- * Get a snapshot of the statistics.
- * It's possible to select a subset of the samples.
- *
- * @returns VBox status. (Basically, it cannot fail.)
- * @param pVM Pointer to the VM.
- * @param pszPat The name matching pattern. See somewhere_where_this_is_described_in_detail.
- * If NULL all samples are reset.
- * @param fWithDesc Whether to include the descriptions.
- * @param ppszSnapshot Where to store the pointer to the snapshot data.
- * The format of the snapshot should be XML, but that will have to be discussed
- * when this function is implemented.
- * The returned pointer must be freed by calling STAMR3SnapshotFree().
- * @param pcchSnapshot Where to store the size of the snapshot data.
- * (Excluding the trailing '\\0')
- */
-VMMR3DECL(int) STAMR3Snapshot(PVM pVM, const char *pszPat, char **ppszSnapshot, size_t *pcchSnapshot, bool fWithDesc)
-{
- return STAMR3SnapshotU(pVM->pUVM, pszPat, ppszSnapshot, pcchSnapshot, fWithDesc);
-}
-
-
-/**
* stamR3EnumU callback employed by STAMR3Snapshot.
*
* @returns VBox status code, but it's interpreted as 0 == success / !0 == failure by enmR3Enum.
@@ -1282,11 +2071,11 @@ static int stamR3SnapshotPrintf(PSTAMR3SNAPSHOTONE pThis, const char *pszFormat,
* Releases a statistics snapshot returned by STAMR3Snapshot().
*
* @returns VBox status.
- * @param pUVM Pointer to the user mode VM structure.
+ * @param pUVM The user mode VM handle.
* @param pszSnapshot The snapshot data pointer returned by STAMR3Snapshot().
* NULL is allowed.
*/
-VMMR3DECL(int) STAMR3SnapshotFreeU(PUVM pUVM, char *pszSnapshot)
+VMMR3DECL(int) STAMR3SnapshotFree(PUVM pUVM, char *pszSnapshot)
{
if (!pszSnapshot)
RTMemFree(pszSnapshot);
@@ -1296,20 +2085,6 @@ VMMR3DECL(int) STAMR3SnapshotFreeU(PUVM pUVM, char *pszSnapshot)
/**
- * Releases a statistics snapshot returned by STAMR3Snapshot().
- *
- * @returns VBox status.
- * @param pVM Pointer to the VM.
- * @param pszSnapshot The snapshot data pointer returned by STAMR3Snapshot().
- * NULL is allowed.
- */
-VMMR3DECL(int) STAMR3SnapshotFree(PVM pVM, char *pszSnapshot)
-{
- return STAMR3SnapshotFreeU(pVM->pUVM, pszSnapshot);
-}
-
-
-/**
* Dumps the selected statistics to the log.
*
* @returns VBox status.
@@ -1317,10 +2092,13 @@ VMMR3DECL(int) STAMR3SnapshotFree(PVM pVM, char *pszSnapshot)
* @param pszPat The name matching pattern. See somewhere_where_this_is_described_in_detail.
* If NULL all samples are written to the log.
*/
-VMMR3DECL(int) STAMR3DumpU(PUVM pUVM, const char *pszPat)
+VMMR3DECL(int) STAMR3Dump(PUVM pUVM, const char *pszPat)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
STAMR3PRINTONEARGS Args;
- Args.pVM = pUVM->pVM;
+ Args.pUVM = pUVM;
Args.pvArg = NULL;
Args.pfnPrintf = stamR3EnumLogPrintf;
@@ -1330,20 +2108,6 @@ VMMR3DECL(int) STAMR3DumpU(PUVM pUVM, const char *pszPat)
/**
- * Dumps the selected statistics to the log.
- *
- * @returns VBox status.
- * @param pVM Pointer to the VM.
- * @param pszPat The name matching pattern. See somewhere_where_this_is_described_in_detail.
- * If NULL all samples are written to the log.
- */
-VMMR3DECL(int) STAMR3Dump(PVM pVM, const char *pszPat)
-{
- return STAMR3DumpU(pVM->pUVM, pszPat);
-}
-
-
-/**
* Prints to the log.
*
* @param pArgs Pointer to the print one argument structure.
@@ -1368,10 +2132,13 @@ static DECLCALLBACK(void) stamR3EnumLogPrintf(PSTAMR3PRINTONEARGS pArgs, const c
* @param pszPat The name matching pattern. See somewhere_where_this_is_described_in_detail.
* If NULL all samples are written to the log.
*/
-VMMR3DECL(int) STAMR3DumpToReleaseLogU(PUVM pUVM, const char *pszPat)
+VMMR3DECL(int) STAMR3DumpToReleaseLog(PUVM pUVM, const char *pszPat)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
STAMR3PRINTONEARGS Args;
- Args.pVM = pUVM->pVM;
+ Args.pUVM = pUVM;
Args.pvArg = NULL;
Args.pfnPrintf = stamR3EnumRelLogPrintf;
@@ -1379,21 +2146,6 @@ VMMR3DECL(int) STAMR3DumpToReleaseLogU(PUVM pUVM, const char *pszPat)
return VINF_SUCCESS;
}
-
-/**
- * Dumps the selected statistics to the release log.
- *
- * @returns VBox status.
- * @param pVM Pointer to the VM.
- * @param pszPat The name matching pattern. See somewhere_where_this_is_described_in_detail.
- * If NULL all samples are written to the log.
- */
-VMMR3DECL(int) STAMR3DumpToReleaseLog(PVM pVM, const char *pszPat)
-{
- return STAMR3DumpToReleaseLogU(pVM->pUVM, pszPat);
-}
-
-
/**
* Prints to the release log.
*
@@ -1415,14 +2167,17 @@ static DECLCALLBACK(void) stamR3EnumRelLogPrintf(PSTAMR3PRINTONEARGS pArgs, cons
* Prints the selected statistics to standard out.
*
* @returns VBox status.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pszPat The name matching pattern. See somewhere_where_this_is_described_in_detail.
* If NULL all samples are reset.
*/
-VMMR3DECL(int) STAMR3PrintU(PUVM pUVM, const char *pszPat)
+VMMR3DECL(int) STAMR3Print(PUVM pUVM, const char *pszPat)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
STAMR3PRINTONEARGS Args;
- Args.pVM = pUVM->pVM;
+ Args.pUVM = pUVM;
Args.pvArg = NULL;
Args.pfnPrintf = stamR3EnumPrintf;
@@ -1432,20 +2187,6 @@ VMMR3DECL(int) STAMR3PrintU(PUVM pUVM, const char *pszPat)
/**
- * Prints the selected statistics to standard out.
- *
- * @returns VBox status.
- * @param pVM Pointer to the VM.
- * @param pszPat The name matching pattern. See somewhere_where_this_is_described_in_detail.
- * If NULL all samples are reset.
- */
-VMMR3DECL(int) STAMR3Print(PVM pVM, const char *pszPat)
-{
- return STAMR3PrintU(pVM->pUVM, pszPat);
-}
-
-
-/**
* Prints to stdout.
*
* @param pArgs Pointer to the print one argument structure.
@@ -1507,7 +2248,7 @@ static int stamR3PrintOne(PSTAMDESC pDesc, void *pvArg)
case STAMTYPE_CALLBACK:
{
char szBuf[512];
- pDesc->u.Callback.pfnPrint(pArgs->pVM, pDesc->u.Callback.pvSample, szBuf, sizeof(szBuf));
+ pDesc->u.Callback.pfnPrint(pArgs->pUVM->pVM, pDesc->u.Callback.pvSample, szBuf, sizeof(szBuf));
pArgs->pfnPrintf(pArgs, "%-32s %s %s\n", pDesc->pszName, szBuf, STAMR3GetUnit(pDesc->enmUnit));
break;
}
@@ -1589,13 +2330,16 @@ static int stamR3PrintOne(PSTAMDESC pDesc, void *pvArg)
*
* @returns Whatever the callback returns.
*
- * @param pUVM Pointer to the user mode VM structure.
+ * @param pUVM The user mode VM handle.
* @param pszPat The pattern to match samples.
* @param pfnEnum The callback function.
* @param pvUser The pvUser argument of the callback function.
*/
-VMMR3DECL(int) STAMR3EnumU(PUVM pUVM, const char *pszPat, PFNSTAMR3ENUM pfnEnum, void *pvUser)
+VMMR3DECL(int) STAMR3Enum(PUVM pUVM, const char *pszPat, PFNSTAMR3ENUM pfnEnum, void *pvUser)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+
STAMR3ENUMONEARGS Args;
Args.pVM = pUVM->pVM;
Args.pfnEnum = pfnEnum;
@@ -1606,22 +2350,6 @@ VMMR3DECL(int) STAMR3EnumU(PUVM pUVM, const char *pszPat, PFNSTAMR3ENUM pfnEnum,
/**
- * Enumerate the statistics by the means of a callback function.
- *
- * @returns Whatever the callback returns.
- *
- * @param pVM Pointer to the VM.
- * @param pszPat The pattern to match samples.
- * @param pfnEnum The callback function.
- * @param pvUser The pvUser argument of the callback function.
- */
-VMMR3DECL(int) STAMR3Enum(PVM pVM, const char *pszPat, PFNSTAMR3ENUM pfnEnum, void *pvUser)
-{
- return STAMR3EnumU(pVM->pUVM, pszPat, pfnEnum, pvUser);
-}
-
-
-/**
* Callback function for STARTR3Enum().
*
* @returns whatever the callback returns.
@@ -1648,6 +2376,19 @@ static int stamR3EnumOne(PSTAMDESC pDesc, void *pvArg)
/**
+ * Checks if the string contains a pattern expression or not.
+ *
+ * @returns true / false.
+ * @param pszPat The potential pattern.
+ */
+static bool stamR3IsPattern(const char *pszPat)
+{
+ return strchr(pszPat, '*') != NULL
+ || strchr(pszPat, '?') != NULL;
+}
+
+
+/**
* Match a name against an array of patterns.
*
* @returns true if it matches, false if it doesn't match.
@@ -1745,9 +2486,10 @@ static int stamR3EnumU(PUVM pUVM, const char *pszPat, bool fUpdateRing0,
int (*pfnCallback)(PSTAMDESC pDesc, void *pvArg), void *pvArg)
{
int rc = VINF_SUCCESS;
+ PSTAMDESC pCur;
/*
- * All
+ * All.
*/
if (!pszPat || !*pszPat || !strcmp(pszPat, "*"))
{
@@ -1755,15 +2497,11 @@ static int stamR3EnumU(PUVM pUVM, const char *pszPat, bool fUpdateRing0,
stamR3Ring0StatsUpdateU(pUVM, "*");
STAM_LOCK_RD(pUVM);
- PSTAMDESC pCur = pUVM->stam.s.pHead;
- while (pCur)
+ RTListForEach(&pUVM->stam.s.List, pCur, STAMDESC, ListEntry)
{
rc = pfnCallback(pCur, pvArg);
if (rc)
break;
-
- /* next */
- pCur = pCur->pNext;
}
STAM_UNLOCK_RD(pUVM);
}
@@ -1777,16 +2515,48 @@ static int stamR3EnumU(PUVM pUVM, const char *pszPat, bool fUpdateRing0,
stamR3Ring0StatsUpdateU(pUVM, pszPat);
STAM_LOCK_RD(pUVM);
- /** @todo This needs to be optimized since the GUI is using this path for the VM info dialog.
- * Note that it's doing exact matching. Organizing the samples in a tree would speed up thing
- * no end (at least for debug and profile builds). */
- for (PSTAMDESC pCur = pUVM->stam.s.pHead; pCur; pCur = pCur->pNext)
+#ifdef STAM_WITH_LOOKUP_TREE
+ if (!stamR3IsPattern(pszPat))
+ {
+ pCur = stamR3LookupFindDesc(pUVM->stam.s.pRoot, pszPat);
+ if (pCur)
+ rc = pfnCallback(pCur, pvArg);
+ }
+ else
+ {
+ PSTAMDESC pLast;
+ pCur = stamR3LookupFindPatternDescRange(pUVM->stam.s.pRoot, &pUVM->stam.s.List, pszPat, &pLast);
+ if (pCur)
+ {
+ for (;;)
+ {
+ if (RTStrSimplePatternMatch(pszPat, pCur->pszName))
+ {
+ rc = pfnCallback(pCur, pvArg);
+ if (rc)
+ break;
+ }
+ if (pCur == pLast)
+ break;
+ pCur = RTListNodeGetNext(&pCur->ListEntry, STAMDESC, ListEntry);
+ }
+ Assert(pLast);
+ }
+ else
+ Assert(!pLast);
+
+ }
+#else
+ RTListForEach(&pUVM->stam.s.List, pCur, STAMDESC, ListEntry)
+ {
if (RTStrSimplePatternMatch(pszPat, pCur->pszName))
{
rc = pfnCallback(pCur, pvArg);
if (rc)
break;
}
+ }
+#endif
STAM_UNLOCK_RD(pUVM);
}
@@ -1812,13 +2582,15 @@ static int stamR3EnumU(PUVM pUVM, const char *pszPat, bool fUpdateRing0,
STAM_LOCK_RD(pUVM);
unsigned iExpression = 0;
- for (PSTAMDESC pCur = pUVM->stam.s.pHead; pCur; pCur = pCur->pNext)
+ RTListForEach(&pUVM->stam.s.List, pCur, STAMDESC, ListEntry)
+ {
if (stamR3MultiMatch(papszExpressions, cExpressions, &iExpression, pCur->pszName))
{
rc = pfnCallback(pCur, pvArg);
if (rc)
break;
}
+ }
STAM_UNLOCK_RD(pUVM);
RTMemTmpFree(papszExpressions);
@@ -2003,30 +2775,22 @@ VMMR3DECL(const char *) STAMR3GetUnit(STAMUNIT enmUnit)
#ifdef VBOX_WITH_DEBUGGER
/**
- * The '.stats' command.
- *
- * @returns VBox status.
- * @param pCmd Pointer to the command descriptor (as registered).
- * @param pCmdHlp Pointer to command helper functions.
- * @param pVM Pointer to the current VM (if any).
- * @param paArgs Pointer to (readonly) array of arguments.
- * @param cArgs Number of arguments in the array.
+ * @callback_method_impl{FNDBGCCMD, The '.stats' command.}
*/
-static DECLCALLBACK(int) stamR3CmdStats(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
+static DECLCALLBACK(int) stamR3CmdStats(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
{
/*
* Validate input.
*/
- DBGC_CMDHLP_REQ_VM_RET(pCmdHlp, pCmd, pVM);
- PUVM pUVM = pVM->pUVM;
- if (!pUVM->stam.s.pHead)
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ if (RTListIsEmpty(&pUVM->stam.s.List))
return DBGCCmdHlpFail(pCmdHlp, pCmd, "No statistics present");
/*
* Do the printing.
*/
STAMR3PRINTONEARGS Args;
- Args.pVM = pVM;
+ Args.pUVM = pUVM;
Args.pvArg = pCmdHlp;
Args.pfnPrintf = stamR3EnumDbgfPrintf;
@@ -2054,29 +2818,21 @@ static DECLCALLBACK(void) stamR3EnumDbgfPrintf(PSTAMR3PRINTONEARGS pArgs, const
/**
- * The '.statsreset' command.
- *
- * @returns VBox status.
- * @param pCmd Pointer to the command descriptor (as registered).
- * @param pCmdHlp Pointer to command helper functions.
- * @param pVM Pointer to the current VM (if any).
- * @param paArgs Pointer to (readonly) array of arguments.
- * @param cArgs Number of arguments in the array.
+ * @callback_method_impl{FNDBGCCMD, The '.statsreset' command.}
*/
-static DECLCALLBACK(int) stamR3CmdStatsReset(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
+static DECLCALLBACK(int) stamR3CmdStatsReset(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
{
/*
* Validate input.
*/
- DBGC_CMDHLP_REQ_VM_RET(pCmdHlp, pCmd, pVM);
- PUVM pUVM = pVM->pUVM;
- if (!pUVM->stam.s.pHead)
+ DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
+ if (RTListIsEmpty(&pUVM->stam.s.List))
return DBGCCmdHlpFail(pCmdHlp, pCmd, "No statistics present");
/*
* Execute reset.
*/
- int rc = STAMR3ResetU(pUVM, cArgs ? paArgs[0].u.pszString : NULL);
+ int rc = STAMR3Reset(pUVM, cArgs ? paArgs[0].u.pszString : NULL);
if (RT_SUCCESS(rc))
return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "STAMR3ResetU");
return DBGCCmdHlpPrintf(pCmdHlp, "Statistics have been reset.\n");
diff --git a/src/VBox/VMM/VMMR3/TM.cpp b/src/VBox/VMM/VMMR3/TM.cpp
index 49390dd7..4391ea4d 100644
--- a/src/VBox/VMM/VMMR3/TM.cpp
+++ b/src/VBox/VMM/VMMR3/TM.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2010 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -124,6 +124,7 @@
#include <iprt/asm-amd64-x86.h> /* for SUPGetCpuHzFromGIP from sup.h */
#include <VBox/vmm/vmm.h>
#include <VBox/vmm/mm.h>
+#include <VBox/vmm/hm.h>
#include <VBox/vmm/ssm.h>
#include <VBox/vmm/dbgf.h>
#include <VBox/vmm/dbgftrace.h>
@@ -134,6 +135,7 @@
#include <VBox/vmm/iom.h>
#include "TMInternal.h"
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include <VBox/vmm/pdmdev.h>
#include <VBox/param.h>
@@ -170,7 +172,7 @@ static DECLCALLBACK(int) tmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion
static DECLCALLBACK(void) tmR3TimerCallback(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
static void tmR3TimerQueueRun(PVM pVM, PTMTIMERQUEUE pQueue);
static void tmR3TimerQueueRunVirtualSync(PVM pVM);
-static DECLCALLBACK(int) tmR3SetWarpDrive(PVM pVM, uint32_t u32Percent);
+static DECLCALLBACK(int) tmR3SetWarpDrive(PUVM pUVM, uint32_t u32Percent);
#ifndef VBOX_WITHOUT_NS_ACCOUNTING
static DECLCALLBACK(void) tmR3CpuLoadTimer(PVM pVM, PTMTIMER pTimer, void *pvUser);
#endif
@@ -344,6 +346,13 @@ VMM_INT_DECL(int) TMR3Init(PVM pVM)
pVM->tm.s.fMaybeUseOffsettedHostTSC = tmR3HasFixedTSC(pVM);
else
pVM->tm.s.fMaybeUseOffsettedHostTSC = true;
+ /** @todo needs a better fix, for now disable offsetted mode for VMs
+ * with more than one VCPU. With the current TSC handling (frequent
+ * switching between offsetted mode and taking VM exits, on all VCPUs
+ * without any kind of coordination) it will lead to inconsistent TSC
+ * behavior with guest SMP, including TSC going backwards. */
+ if (pVM->cCpus != 1)
+ pVM->tm.s.fMaybeUseOffsettedHostTSC = false;
}
/** @cfgm{TM/TSCTicksPerSecond, uint32_t, Current TSC frequency from GIP}
@@ -445,7 +454,7 @@ VMM_INT_DECL(int) TMR3Init(PVM pVM)
/** @cfgm{TM/CatchUpPrecentage[0..9], uint32_t, %, 1, 2000, various}
* The catch-up percent for a given period. */
- /** @cfgm{TM/CatchUpStartThreshold[0..9], uint64_t, ns, 0, UINT64_MAX,
+ /** @cfgm{TM/CatchUpStartThreshold[0..9], uint64_t, ns, 0, UINT64_MAX}
* The catch-up period threshold, or if you like, when a period starts. */
#define TM_CFG_PERIOD(iPeriod, DefStart, DefPct) \
do \
@@ -930,21 +939,24 @@ VMM_INT_DECL(int) TMR3InitFinalize(PVM pVM)
/*
* Resolve symbols.
*/
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "tmVirtualNanoTSBad", &pVM->tm.s.VirtualGetRawDataRC.pfnBad);
- AssertRCReturn(rc, rc);
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "tmVirtualNanoTSRediscover", &pVM->tm.s.VirtualGetRawDataRC.pfnRediscover);
- AssertRCReturn(rc, rc);
- if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceSync)
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLFenceSync", &pVM->tm.s.pfnVirtualGetRawRC);
- else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceAsync)
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLFenceAsync", &pVM->tm.s.pfnVirtualGetRawRC);
- else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacySync)
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLegacySync", &pVM->tm.s.pfnVirtualGetRawRC);
- else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacyAsync)
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLegacyAsync", &pVM->tm.s.pfnVirtualGetRawRC);
- else
- AssertFatalFailed();
- AssertRCReturn(rc, rc);
+ if (!HMIsEnabled(pVM))
+ {
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "tmVirtualNanoTSBad", &pVM->tm.s.VirtualGetRawDataRC.pfnBad);
+ AssertRCReturn(rc, rc);
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "tmVirtualNanoTSRediscover", &pVM->tm.s.VirtualGetRawDataRC.pfnRediscover);
+ AssertRCReturn(rc, rc);
+ if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceSync)
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLFenceSync", &pVM->tm.s.pfnVirtualGetRawRC);
+ else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceAsync)
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLFenceAsync", &pVM->tm.s.pfnVirtualGetRawRC);
+ else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacySync)
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLegacySync", &pVM->tm.s.pfnVirtualGetRawRC);
+ else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacyAsync)
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLegacyAsync", &pVM->tm.s.pfnVirtualGetRawRC);
+ else
+ AssertFatalFailed();
+ AssertRCReturn(rc, rc);
+ }
rc = PDMR3LdrGetSymbolR0(pVM, NULL, "tmVirtualNanoTSBad", &pVM->tm.s.VirtualGetRawDataR0.pfnBad);
AssertRCReturn(rc, rc);
@@ -990,28 +1002,31 @@ VMM_INT_DECL(void) TMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
LogFlow(("TMR3Relocate\n"));
NOREF(offDelta);
- pVM->tm.s.pvGIPRC = MMHyperR3ToRC(pVM, pVM->tm.s.pvGIPR3);
- pVM->tm.s.paTimerQueuesRC = MMHyperR3ToRC(pVM, pVM->tm.s.paTimerQueuesR3);
pVM->tm.s.paTimerQueuesR0 = MMHyperR3ToR0(pVM, pVM->tm.s.paTimerQueuesR3);
- pVM->tm.s.VirtualGetRawDataRC.pu64Prev = MMHyperR3ToRC(pVM, (void *)&pVM->tm.s.u64VirtualRawPrev);
- AssertFatal(pVM->tm.s.VirtualGetRawDataRC.pu64Prev);
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "tmVirtualNanoTSBad", &pVM->tm.s.VirtualGetRawDataRC.pfnBad);
- AssertFatalRC(rc);
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "tmVirtualNanoTSRediscover", &pVM->tm.s.VirtualGetRawDataRC.pfnRediscover);
- AssertFatalRC(rc);
-
- if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceSync)
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLFenceSync", &pVM->tm.s.pfnVirtualGetRawRC);
- else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceAsync)
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLFenceAsync", &pVM->tm.s.pfnVirtualGetRawRC);
- else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacySync)
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLegacySync", &pVM->tm.s.pfnVirtualGetRawRC);
- else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacyAsync)
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLegacyAsync", &pVM->tm.s.pfnVirtualGetRawRC);
- else
- AssertFatalFailed();
- AssertFatalRC(rc);
+ if (!HMIsEnabled(pVM))
+ {
+ pVM->tm.s.pvGIPRC = MMHyperR3ToRC(pVM, pVM->tm.s.pvGIPR3);
+ pVM->tm.s.paTimerQueuesRC = MMHyperR3ToRC(pVM, pVM->tm.s.paTimerQueuesR3);
+ pVM->tm.s.VirtualGetRawDataRC.pu64Prev = MMHyperR3ToRC(pVM, (void *)&pVM->tm.s.u64VirtualRawPrev);
+ AssertFatal(pVM->tm.s.VirtualGetRawDataRC.pu64Prev);
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "tmVirtualNanoTSBad", &pVM->tm.s.VirtualGetRawDataRC.pfnBad);
+ AssertFatalRC(rc);
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "tmVirtualNanoTSRediscover", &pVM->tm.s.VirtualGetRawDataRC.pfnRediscover);
+ AssertFatalRC(rc);
+
+ if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceSync)
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLFenceSync", &pVM->tm.s.pfnVirtualGetRawRC);
+ else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceAsync)
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLFenceAsync", &pVM->tm.s.pfnVirtualGetRawRC);
+ else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacySync)
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLegacySync", &pVM->tm.s.pfnVirtualGetRawRC);
+ else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacyAsync)
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLegacyAsync", &pVM->tm.s.pfnVirtualGetRawRC);
+ else
+ AssertFatalFailed();
+ AssertFatalRC(rc);
+ }
/*
* Iterate the timers updating the pVMRC pointers.
@@ -1656,6 +1671,7 @@ VMMR3DECL(int) TMR3TimerDestroy(PTMTIMER pTimer)
STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
Assert(pQueue->offSchedule);
tmTimerQueueSchedule(pVM, pQueue);
+ STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
}
/*
@@ -1875,17 +1891,17 @@ static DECLCALLBACK(void) tmR3TimerCallback(PRTTIMER pTimer, void *pvUser, uint6
AssertCompile(TMCLOCK_MAX == 4);
#ifdef DEBUG_Sander /* very annoying, keep it private. */
- if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
+ if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
Log(("tmR3TimerCallback: timer event still pending!!\n"));
#endif
- if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
+ if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
&& ( pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].offSchedule /** @todo FIXME - reconsider offSchedule as a reason for running the timer queues. */
|| pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].offSchedule
|| pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL].offSchedule
|| pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].offSchedule
|| tmR3AnyExpiredTimers(pVM)
)
- && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
+ && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
&& !pVM->tm.s.fRunningQueues
)
{
@@ -2617,8 +2633,20 @@ VMMR3DECL(int) TMR3TimerSetCritSect(PTMTIMERR3 pTimer, PPDMCRITSECT pCritSect)
*/
VMMR3_INT_DECL(PRTTIMESPEC) TMR3UtcNow(PVM pVM, PRTTIMESPEC pTime)
{
+ /* Get a stable set of VirtualSync parameters before querying UTC. */
+ uint64_t offVirtualSync;
+ uint64_t offVirtualSyncGivenUp;
+ do
+ {
+ offVirtualSync = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
+ offVirtualSyncGivenUp = ASMAtomicReadU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp);
+ } while (ASMAtomicReadU64(&pVM->tm.s.offVirtualSync) != offVirtualSync);
+
+ Assert(offVirtualSync >= offVirtualSyncGivenUp);
+ uint64_t const offLag = offVirtualSync - offVirtualSyncGivenUp;
+
RTTimeNow(pTime);
- RTTimeSpecSubNano(pTime, ASMAtomicReadU64(&pVM->tm.s.offVirtualSync) - ASMAtomicReadU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp));
+ RTTimeSpecSubNano(pTime, offLag);
RTTimeSpecAddNano(pTime, pVM->tm.s.offUTC);
return pTime;
}
@@ -2718,9 +2746,9 @@ VMMR3DECL(int) TMR3NotifyResume(PVM pVM, PVMCPU pVCpu)
* @param pVM Pointer to the VM.
* @param u32Percent The new percentage. 100 means normal operation.
*/
-VMMDECL(int) TMR3SetWarpDrive(PVM pVM, uint32_t u32Percent)
+VMMDECL(int) TMR3SetWarpDrive(PUVM pUVM, uint32_t u32Percent)
{
- return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)tmR3SetWarpDrive, 2, pVM, u32Percent);
+ return VMR3ReqPriorityCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)tmR3SetWarpDrive, 2, pUVM, u32Percent);
}
@@ -2728,12 +2756,14 @@ VMMDECL(int) TMR3SetWarpDrive(PVM pVM, uint32_t u32Percent)
* EMT worker for TMR3SetWarpDrive.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param u32Percent See TMR3SetWarpDrive().
* @internal
*/
-static DECLCALLBACK(int) tmR3SetWarpDrive(PVM pVM, uint32_t u32Percent)
+static DECLCALLBACK(int) tmR3SetWarpDrive(PUVM pUVM, uint32_t u32Percent)
{
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
PVMCPU pVCpu = VMMGetCpu(pVM);
/*
@@ -2768,6 +2798,21 @@ static DECLCALLBACK(int) tmR3SetWarpDrive(PVM pVM, uint32_t u32Percent)
/**
+ * Gets the current warp drive percent.
+ *
+ * @returns The warp drive percent.
+ * @param pVM Pointer to the VM.
+ */
+VMMR3DECL(uint32_t) TMR3GetWarpDrive(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, UINT32_MAX);
+ return pVM->tm.s.u32VirtualWarpDrivePercentage;
+}
+
+
+/**
* Gets the performance information for one virtual CPU as seen by the VMM.
*
* The returned times covers the period where the VM is running and will be
diff --git a/src/VBox/VMM/VMMR3/TRPM.cpp b/src/VBox/VMM/VMMR3/TRPM.cpp
index c87ea4c2..656f215e 100644
--- a/src/VBox/VMM/VMMR3/TRPM.cpp
+++ b/src/VBox/VMM/VMMR3/TRPM.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -92,7 +92,7 @@
#ifdef VBOX_WITH_REM
# include <VBox/vmm/rem.h>
#endif
-#include <VBox/vmm/hwaccm.h>
+#include <VBox/vmm/hm.h>
#include <VBox/err.h>
#include <VBox/param.h>
@@ -423,11 +423,12 @@ static VBOXIDTE_GENERIC g_aIdt[256] =
};
+#ifdef VBOX_WITH_RAW_MODE
/** Enable or disable tracking of Guest's IDT. */
-#define TRPM_TRACK_GUEST_IDT_CHANGES
-
+# define TRPM_TRACK_GUEST_IDT_CHANGES
/** Enable or disable tracking of Shadow IDT. */
-#define TRPM_TRACK_SHADOW_IDT_CHANGES
+# define TRPM_TRACK_SHADOW_IDT_CHANGES
+#endif
/** TRPM saved state version. */
#define TRPM_SAVED_STATE_VERSION 9
@@ -439,7 +440,9 @@ static VBOXIDTE_GENERIC g_aIdt[256] =
*******************************************************************************/
static DECLCALLBACK(int) trpmR3Save(PVM pVM, PSSMHANDLE pSSM);
static DECLCALLBACK(int) trpmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
+#ifdef TRPM_TRACK_GUEST_IDT_CHANGES
static DECLCALLBACK(int) trpmR3GuestIDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
+#endif
/**
@@ -472,12 +475,11 @@ VMMR3DECL(int) TRPMR3Init(PVM pVM)
pVCpu->trpm.s.offVM = RT_OFFSETOF(VM, aCpus[i].trpm);
pVCpu->trpm.s.offVMCpu = RT_OFFSETOF(VMCPU, trpm);
- pVCpu->trpm.s.uActiveVector = ~0;
+ pVCpu->trpm.s.uActiveVector = ~0U;
}
pVM->trpm.s.GuestIdtr.pIdt = RTRCPTR_MAX;
- pVM->trpm.s.pvMonShwIdtRC = RTRCPTR_MAX;
- pVM->trpm.s.fDisableMonitoring = false;
+ pVM->trpm.s.pvMonShwIdtRC = RTRCPTR_MAX;
pVM->trpm.s.fSafeToDropGuestIDTMonitoring = false;
/*
@@ -516,65 +518,83 @@ VMMR3DECL(int) TRPMR3Init(PVM pVM)
/*
* Statistics.
*/
- STAM_REG(pVM, &pVM->trpm.s.StatRCWriteGuestIDTFault, STAMTYPE_COUNTER, "/TRPM/RC/IDTWritesFault", STAMUNIT_OCCURENCES, "Guest IDT writes the we returned to R3 to handle.");
- STAM_REG(pVM, &pVM->trpm.s.StatRCWriteGuestIDTHandled, STAMTYPE_COUNTER, "/TRPM/RC/IDTWritesHandled", STAMUNIT_OCCURENCES, "Guest IDT writes that we handled successfully.");
- STAM_REG(pVM, &pVM->trpm.s.StatSyncIDT, STAMTYPE_PROFILE, "/PROF/TRPM/SyncIDT", STAMUNIT_TICKS_PER_CALL, "Profiling of TRPMR3SyncIDT().");
-
- /* traps */
- STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x00], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/00", STAMUNIT_TICKS_PER_CALL, "#DE - Divide error.");
- STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x01], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/01", STAMUNIT_TICKS_PER_CALL, "#DB - Debug (single step and more).");
- //STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x02], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/02", STAMUNIT_TICKS_PER_CALL, "NMI");
- STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x03], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/03", STAMUNIT_TICKS_PER_CALL, "#BP - Breakpoint.");
- STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x04], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/04", STAMUNIT_TICKS_PER_CALL, "#OF - Overflow.");
- STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x05], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/05", STAMUNIT_TICKS_PER_CALL, "#BR - Bound range exceeded.");
- STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x06], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/06", STAMUNIT_TICKS_PER_CALL, "#UD - Undefined opcode.");
- STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x07], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/07", STAMUNIT_TICKS_PER_CALL, "#NM - Device not available (FPU).");
- //STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x08], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/08", STAMUNIT_TICKS_PER_CALL, "#DF - Double fault.");
- STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x09], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/09", STAMUNIT_TICKS_PER_CALL, "#?? - Coprocessor segment overrun (obsolete).");
- STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x0a], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/0a", STAMUNIT_TICKS_PER_CALL, "#TS - Task switch fault.");
- STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x0b], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/0b", STAMUNIT_TICKS_PER_CALL, "#NP - Segment not present.");
- STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x0c], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/0c", STAMUNIT_TICKS_PER_CALL, "#SS - Stack segment fault.");
- STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x0d], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/0d", STAMUNIT_TICKS_PER_CALL, "#GP - General protection fault.");
- STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x0e], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/0e", STAMUNIT_TICKS_PER_CALL, "#PF - Page fault.");
- //STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x0f], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/0f", STAMUNIT_TICKS_PER_CALL, "Reserved.");
- STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x10], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/10", STAMUNIT_TICKS_PER_CALL, "#MF - Math fault..");
- STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x11], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/11", STAMUNIT_TICKS_PER_CALL, "#AC - Alignment check.");
- STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x12], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/12", STAMUNIT_TICKS_PER_CALL, "#MC - Machine check.");
- STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x13], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/13", STAMUNIT_TICKS_PER_CALL, "#XF - SIMD Floating-Point Exception.");
-
-#ifdef VBOX_WITH_STATISTICS
+#ifdef VBOX_WITH_RAW_MODE
+ if (!HMIsEnabled(pVM))
+ {
+ STAM_REG(pVM, &pVM->trpm.s.StatRCWriteGuestIDTFault, STAMTYPE_COUNTER, "/TRPM/RC/IDTWritesFault", STAMUNIT_OCCURENCES, "Guest IDT writes the we returned to R3 to handle.");
+ STAM_REG(pVM, &pVM->trpm.s.StatRCWriteGuestIDTHandled, STAMTYPE_COUNTER, "/TRPM/RC/IDTWritesHandled", STAMUNIT_OCCURENCES, "Guest IDT writes that we handled successfully.");
+ STAM_REG(pVM, &pVM->trpm.s.StatSyncIDT, STAMTYPE_PROFILE, "/PROF/TRPM/SyncIDT", STAMUNIT_TICKS_PER_CALL, "Profiling of TRPMR3SyncIDT().");
+
+ /* traps */
+ STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x00], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/00", STAMUNIT_TICKS_PER_CALL, "#DE - Divide error.");
+ STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x01], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/01", STAMUNIT_TICKS_PER_CALL, "#DB - Debug (single step and more).");
+ //STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x02], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/02", STAMUNIT_TICKS_PER_CALL, "NMI");
+ STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x03], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/03", STAMUNIT_TICKS_PER_CALL, "#BP - Breakpoint.");
+ STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x04], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/04", STAMUNIT_TICKS_PER_CALL, "#OF - Overflow.");
+ STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x05], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/05", STAMUNIT_TICKS_PER_CALL, "#BR - Bound range exceeded.");
+ STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x06], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/06", STAMUNIT_TICKS_PER_CALL, "#UD - Undefined opcode.");
+ STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x07], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/07", STAMUNIT_TICKS_PER_CALL, "#NM - Device not available (FPU).");
+ //STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x08], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/08", STAMUNIT_TICKS_PER_CALL, "#DF - Double fault.");
+ STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x09], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/09", STAMUNIT_TICKS_PER_CALL, "#?? - Coprocessor segment overrun (obsolete).");
+ STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x0a], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/0a", STAMUNIT_TICKS_PER_CALL, "#TS - Task switch fault.");
+ STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x0b], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/0b", STAMUNIT_TICKS_PER_CALL, "#NP - Segment not present.");
+ STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x0c], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/0c", STAMUNIT_TICKS_PER_CALL, "#SS - Stack segment fault.");
+ STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x0d], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/0d", STAMUNIT_TICKS_PER_CALL, "#GP - General protection fault.");
+ STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x0e], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/0e", STAMUNIT_TICKS_PER_CALL, "#PF - Page fault.");
+ //STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x0f], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/0f", STAMUNIT_TICKS_PER_CALL, "Reserved.");
+ STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x10], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/10", STAMUNIT_TICKS_PER_CALL, "#MF - Math fault..");
+ STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x11], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/11", STAMUNIT_TICKS_PER_CALL, "#AC - Alignment check.");
+ STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x12], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/12", STAMUNIT_TICKS_PER_CALL, "#MC - Machine check.");
+ STAM_REG(pVM, &pVM->trpm.s.aStatGCTraps[0x13], STAMTYPE_PROFILE_ADV, "/TRPM/GC/Traps/13", STAMUNIT_TICKS_PER_CALL, "#XF - SIMD Floating-Point Exception.");
+ }
+#endif
+
+# ifdef VBOX_WITH_STATISTICS
rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, sizeof(STAMCOUNTER), MM_TAG_TRPM, (void **)&pVM->trpm.s.paStatForwardedIRQR3);
AssertRCReturn(rc, rc);
pVM->trpm.s.paStatForwardedIRQRC = MMHyperR3ToRC(pVM, pVM->trpm.s.paStatForwardedIRQR3);
- pVM->trpm.s.paStatForwardedIRQR0 = MMHyperR3ToR0(pVM, pVM->trpm.s.paStatForwardedIRQR3);
for (unsigned i = 0; i < 256; i++)
STAMR3RegisterF(pVM, &pVM->trpm.s.paStatForwardedIRQR3[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Forwarded interrupts.",
i < 0x20 ? "/TRPM/ForwardRaw/TRAP/%02X" : "/TRPM/ForwardRaw/IRQ/%02X", i);
- rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, sizeof(STAMCOUNTER), MM_TAG_TRPM, (void **)&pVM->trpm.s.paStatHostIrqR3);
- AssertRCReturn(rc, rc);
- pVM->trpm.s.paStatHostIrqRC = MMHyperR3ToRC(pVM, pVM->trpm.s.paStatHostIrqR3);
- pVM->trpm.s.paStatHostIrqR0 = MMHyperR3ToR0(pVM, pVM->trpm.s.paStatHostIrqR3);
- for (unsigned i = 0; i < 256; i++)
- STAMR3RegisterF(pVM, &pVM->trpm.s.paStatHostIrqR3[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
- "Host interrupts.", "/TRPM/HostIRQs/%02x", i);
-#endif
-
- STAM_REG(pVM, &pVM->trpm.s.StatForwardProfR3, STAMTYPE_PROFILE_ADV, "/TRPM/ForwardRaw/ProfR3", STAMUNIT_TICKS_PER_CALL, "Profiling TRPMForwardTrap.");
- STAM_REG(pVM, &pVM->trpm.s.StatForwardProfRZ, STAMTYPE_PROFILE_ADV, "/TRPM/ForwardRaw/ProfRZ", STAMUNIT_TICKS_PER_CALL, "Profiling TRPMForwardTrap.");
- STAM_REG(pVM, &pVM->trpm.s.StatForwardFailNoHandler, STAMTYPE_COUNTER, "/TRPM/ForwardRaw/FailNoHandler", STAMUNIT_OCCURENCES,"Failure to forward interrupt in raw mode.");
- STAM_REG(pVM, &pVM->trpm.s.StatForwardFailPatchAddr, STAMTYPE_COUNTER, "/TRPM/ForwardRaw/FailPatchAddr", STAMUNIT_OCCURENCES,"Failure to forward interrupt in raw mode.");
- STAM_REG(pVM, &pVM->trpm.s.StatForwardFailR3, STAMTYPE_COUNTER, "/TRPM/ForwardRaw/FailR3", STAMUNIT_OCCURENCES, "Failure to forward interrupt in raw mode.");
- STAM_REG(pVM, &pVM->trpm.s.StatForwardFailRZ, STAMTYPE_COUNTER, "/TRPM/ForwardRaw/FailRZ", STAMUNIT_OCCURENCES, "Failure to forward interrupt in raw mode.");
+# ifdef VBOX_WITH_RAW_MODE
+ if (!HMIsEnabled(pVM))
+ {
+ rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, sizeof(STAMCOUNTER), MM_TAG_TRPM, (void **)&pVM->trpm.s.paStatHostIrqR3);
+ AssertRCReturn(rc, rc);
+ pVM->trpm.s.paStatHostIrqRC = MMHyperR3ToRC(pVM, pVM->trpm.s.paStatHostIrqR3);
+ for (unsigned i = 0; i < 256; i++)
+ STAMR3RegisterF(pVM, &pVM->trpm.s.paStatHostIrqR3[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
+ "Host interrupts.", "/TRPM/HostIRQs/%02x", i);
+ }
+# endif
+# endif
- STAM_REG(pVM, &pVM->trpm.s.StatTrap0dDisasm, STAMTYPE_PROFILE, "/TRPM/RC/Traps/0d/Disasm", STAMUNIT_TICKS_PER_CALL, "Profiling disassembly part of trpmGCTrap0dHandler.");
- STAM_REG(pVM, &pVM->trpm.s.StatTrap0dRdTsc, STAMTYPE_COUNTER, "/TRPM/RC/Traps/0d/RdTsc", STAMUNIT_OCCURENCES, "Number of RDTSC #GPs.");
+#ifdef VBOX_WITH_RAW_MODE
+ if (!HMIsEnabled(pVM))
+ {
+ STAM_REG(pVM, &pVM->trpm.s.StatForwardProfR3, STAMTYPE_PROFILE_ADV, "/TRPM/ForwardRaw/ProfR3", STAMUNIT_TICKS_PER_CALL, "Profiling TRPMForwardTrap.");
+ STAM_REG(pVM, &pVM->trpm.s.StatForwardProfRZ, STAMTYPE_PROFILE_ADV, "/TRPM/ForwardRaw/ProfRZ", STAMUNIT_TICKS_PER_CALL, "Profiling TRPMForwardTrap.");
+ STAM_REG(pVM, &pVM->trpm.s.StatForwardFailNoHandler, STAMTYPE_COUNTER, "/TRPM/ForwardRaw/FailNoHandler", STAMUNIT_OCCURENCES,"Failure to forward interrupt in raw mode.");
+ STAM_REG(pVM, &pVM->trpm.s.StatForwardFailPatchAddr, STAMTYPE_COUNTER, "/TRPM/ForwardRaw/FailPatchAddr", STAMUNIT_OCCURENCES,"Failure to forward interrupt in raw mode.");
+ STAM_REG(pVM, &pVM->trpm.s.StatForwardFailR3, STAMTYPE_COUNTER, "/TRPM/ForwardRaw/FailR3", STAMUNIT_OCCURENCES, "Failure to forward interrupt in raw mode.");
+ STAM_REG(pVM, &pVM->trpm.s.StatForwardFailRZ, STAMTYPE_COUNTER, "/TRPM/ForwardRaw/FailRZ", STAMUNIT_OCCURENCES, "Failure to forward interrupt in raw mode.");
+
+ STAM_REG(pVM, &pVM->trpm.s.StatTrap0dDisasm, STAMTYPE_PROFILE, "/TRPM/RC/Traps/0d/Disasm", STAMUNIT_TICKS_PER_CALL, "Profiling disassembly part of trpmGCTrap0dHandler.");
+ STAM_REG(pVM, &pVM->trpm.s.StatTrap0dRdTsc, STAMTYPE_COUNTER, "/TRPM/RC/Traps/0d/RdTsc", STAMUNIT_OCCURENCES, "Number of RDTSC #GPs.");
+ }
+#endif
+#ifdef VBOX_WITH_RAW_MODE
/*
* Default action when entering raw mode for the first time
*/
- PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */
- VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
+ if (!HMIsEnabled(pVM))
+ {
+ PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
+ }
+#endif
return 0;
}
@@ -590,10 +610,14 @@ VMMR3DECL(int) TRPMR3Init(PVM pVM)
*/
VMMR3DECL(void) TRPMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
{
+#ifdef VBOX_WITH_RAW_MODE
+ if (HMIsEnabled(pVM))
+ return;
+
/* Only applies to raw mode which supports only 1 VCPU. */
PVMCPU pVCpu = &pVM->aCpus[0];
-
LogFlow(("TRPMR3Relocate\n"));
+
/*
* Get the trap handler addresses.
*
@@ -664,21 +688,17 @@ VMMR3DECL(void) TRPMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
*/
CPUMSetHyperIDTR(pVCpu, VM_RC_ADDR(pVM, &pVM->trpm.s.aIdt[0]), sizeof(pVM->trpm.s.aIdt)-1);
- if ( !pVM->trpm.s.fDisableMonitoring
- && !VMMIsHwVirtExtForced(pVM))
+# ifdef TRPM_TRACK_SHADOW_IDT_CHANGES
+ if (pVM->trpm.s.pvMonShwIdtRC != RTRCPTR_MAX)
{
-#ifdef TRPM_TRACK_SHADOW_IDT_CHANGES
- if (pVM->trpm.s.pvMonShwIdtRC != RTRCPTR_MAX)
- {
- rc = PGMHandlerVirtualDeregister(pVM, pVM->trpm.s.pvMonShwIdtRC);
- AssertRC(rc);
- }
- pVM->trpm.s.pvMonShwIdtRC = VM_RC_ADDR(pVM, &pVM->trpm.s.aIdt[0]);
- rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->trpm.s.pvMonShwIdtRC, pVM->trpm.s.pvMonShwIdtRC + sizeof(pVM->trpm.s.aIdt) - 1,
- 0, 0, "trpmRCShadowIDTWriteHandler", 0, "Shadow IDT write access handler");
+ rc = PGMHandlerVirtualDeregister(pVM, pVM->trpm.s.pvMonShwIdtRC);
AssertRC(rc);
-#endif
}
+ pVM->trpm.s.pvMonShwIdtRC = VM_RC_ADDR(pVM, &pVM->trpm.s.aIdt[0]);
+ rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->trpm.s.pvMonShwIdtRC, pVM->trpm.s.pvMonShwIdtRC + sizeof(pVM->trpm.s.aIdt) - 1,
+ 0, 0, "trpmRCShadowIDTWriteHandler", 0, "Shadow IDT write access handler");
+ AssertRC(rc);
+# endif
/* Relocate IDT handlers for forwarding guest traps/interrupts. */
for (uint32_t iTrap = 0; iTrap < RT_ELEMENTS(pVM->trpm.s.aGuestTrapHandler); iTrap++)
@@ -702,12 +722,11 @@ VMMR3DECL(void) TRPMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
}
}
-#ifdef VBOX_WITH_STATISTICS
+# ifdef VBOX_WITH_STATISTICS
pVM->trpm.s.paStatForwardedIRQRC += offDelta;
- pVM->trpm.s.paStatForwardedIRQR0 = MMHyperR3ToR0(pVM, pVM->trpm.s.paStatForwardedIRQR3);
pVM->trpm.s.paStatHostIrqRC += offDelta;
- pVM->trpm.s.paStatHostIrqR0 = MMHyperR3ToR0(pVM, pVM->trpm.s.paStatHostIrqR3);
-#endif
+# endif
+#endif /* VBOX_WITH_RAW_MODE */
}
@@ -720,7 +739,7 @@ VMMR3DECL(void) TRPMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
VMMR3DECL(int) TRPMR3Term(PVM pVM)
{
NOREF(pVM);
- return 0;
+ return VINF_SUCCESS;
}
@@ -733,7 +752,7 @@ VMMR3DECL(int) TRPMR3Term(PVM pVM)
*/
VMMR3DECL(void) TRPMR3ResetCpu(PVMCPU pVCpu)
{
- pVCpu->trpm.s.uActiveVector = ~0;
+ pVCpu->trpm.s.uActiveVector = ~0U;
}
@@ -772,14 +791,20 @@ VMMR3DECL(void) TRPMR3Reset(PVM pVM)
memset(pVM->trpm.s.aGuestTrapHandler, 0, sizeof(pVM->trpm.s.aGuestTrapHandler));
TRPMR3Relocate(pVM, 0);
+#ifdef VBOX_WITH_RAW_MODE
/*
* Default action when entering raw mode for the first time
*/
- PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */
- VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
+ if (!HMIsEnabled(pVM))
+ {
+ PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
+ }
+#endif
}
+# ifdef VBOX_WITH_RAW_MODE
/**
* Resolve a builtin RC symbol.
*
@@ -812,6 +837,7 @@ VMMR3_INT_DECL(int) TRPMR3GetImportRC(PVM pVM, const char *pszSymbol, PRTRCPTR p
return VERR_SYMBOL_NOT_FOUND;
return VINF_SUCCESS;
}
+#endif /* VBOX_WITH_RAW_MODE */
/**
@@ -842,9 +868,9 @@ static DECLCALLBACK(int) trpmR3Save(PVM pVM, PSSMHANDLE pSSM)
SSMR3PutGCUIntPtr(pSSM, pTrpmCpu->uSavedCR2);
SSMR3PutGCUInt(pSSM, pTrpmCpu->uPrevVector);
}
- SSMR3PutBool(pSSM, pTrpm->fDisableMonitoring);
+ SSMR3PutBool(pSSM, HMIsEnabled(pVM));
PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies 1 VCPU */
- SSMR3PutUInt(pSSM, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT));
+ SSMR3PutUInt(pSSM, VM_WHEN_RAW_MODE(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT), 0));
SSMR3PutMem(pSSM, &pTrpm->au32IdtPatched[0], sizeof(pTrpm->au32IdtPatched));
SSMR3PutU32(pSSM, ~0); /* separator. */
@@ -915,7 +941,8 @@ static DECLCALLBACK(int) trpmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion,
SSMR3GetGCUInt(pSSM, &pTrpmCpu->uPrevVector);
}
- SSMR3GetBool(pSSM, &pVM->trpm.s.fDisableMonitoring);
+ bool fIgnored;
+ SSMR3GetBool(pSSM, &fIgnored);
}
else
{
@@ -930,9 +957,8 @@ static DECLCALLBACK(int) trpmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion,
SSMR3GetGCUIntPtr(pSSM, &pTrpmCpu->uSavedCR2);
SSMR3GetGCUInt(pSSM, &pTrpmCpu->uPrevVector);
- RTGCUINT fDisableMonitoring;
- SSMR3GetGCUInt(pSSM, &fDisableMonitoring);
- pTrpm->fDisableMonitoring = !!fDisableMonitoring;
+ RTGCUINT fIgnored;
+ SSMR3GetGCUInt(pSSM, &fIgnored);
}
RTUINT fSyncIDT;
@@ -944,12 +970,14 @@ static DECLCALLBACK(int) trpmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion,
AssertMsgFailed(("fSyncIDT=%#x\n", fSyncIDT));
return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
}
+#ifdef VBOX_WITH_RAW_MODE
if (fSyncIDT)
{
PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies 1 VCPU */
VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
}
/* else: cleared by reset call above. */
+#endif
SSMR3GetMem(pSSM, &pTrpm->au32IdtPatched[0], sizeof(pTrpm->au32IdtPatched));
@@ -997,6 +1025,7 @@ static DECLCALLBACK(int) trpmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion,
return VINF_SUCCESS;
}
+#ifdef VBOX_WITH_RAW_MODE
/**
* Check if gate handlers were updated
@@ -1012,11 +1041,7 @@ VMMR3DECL(int) TRPMR3SyncIDT(PVM pVM, PVMCPU pVCpu)
const bool fRawRing0 = EMIsRawRing0Enabled(pVM);
int rc;
- if (pVM->trpm.s.fDisableMonitoring)
- {
- VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
- return VINF_SUCCESS; /* Nothing to do */
- }
+ AssertReturn(!HMIsEnabled(pVM), VERR_TRPM_HM_IPE);
if (fRawRing0 && CSAMIsEnabled(pVM))
{
@@ -1041,7 +1066,7 @@ VMMR3DECL(int) TRPMR3SyncIDT(PVM pVM, PVMCPU pVCpu)
return DBGFSTOP(pVM);
}
-#ifdef TRPM_TRACK_GUEST_IDT_CHANGES
+# ifdef TRPM_TRACK_GUEST_IDT_CHANGES
/*
* Check if Guest's IDTR has changed.
*/
@@ -1080,7 +1105,7 @@ VMMR3DECL(int) TRPMR3SyncIDT(PVM pVM, PVMCPU pVCpu)
/* Update saved Guest IDTR. */
pVM->trpm.s.GuestIdtr = IDTR;
}
-#endif
+# endif
/*
* Sync the interrupt gate.
@@ -1108,45 +1133,7 @@ VMMR3DECL(int) TRPMR3SyncIDT(PVM pVM, PVMCPU pVCpu)
}
-/**
- * Disable IDT monitoring and syncing
- *
- * @param pVM Pointer to the VM.
- */
-VMMR3DECL(void) TRPMR3DisableMonitoring(PVM pVM)
-{
- /*
- * Deregister any virtual handlers.
- */
-#ifdef TRPM_TRACK_GUEST_IDT_CHANGES
- if (pVM->trpm.s.GuestIdtr.pIdt != RTRCPTR_MAX)
- {
- if (!pVM->trpm.s.fSafeToDropGuestIDTMonitoring)
- {
- int rc = PGMHandlerVirtualDeregister(pVM, pVM->trpm.s.GuestIdtr.pIdt);
- AssertRC(rc);
- }
- pVM->trpm.s.GuestIdtr.pIdt = RTRCPTR_MAX;
- }
- pVM->trpm.s.GuestIdtr.cbIdt = 0;
-#endif
-
-#ifdef TRPM_TRACK_SHADOW_IDT_CHANGES
- if (pVM->trpm.s.pvMonShwIdtRC != RTRCPTR_MAX)
- {
- int rc = PGMHandlerVirtualDeregister(pVM, pVM->trpm.s.pvMonShwIdtRC);
- AssertRC(rc);
- pVM->trpm.s.pvMonShwIdtRC = RTRCPTR_MAX;
- }
-#endif
-
- PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */
- VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
-
- pVM->trpm.s.fDisableMonitoring = true;
-}
-
-
+# ifdef TRPM_TRACK_GUEST_IDT_CHANGES
/**
* \#PF Handler callback for virtual access handler ranges.
*
@@ -1169,10 +1156,12 @@ static DECLCALLBACK(int) trpmR3GuestIDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void
Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
Log(("trpmR3GuestIDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf);
NOREF(pvPtr); NOREF(pvUser); NOREF(pvBuf);
+ Assert(!HMIsEnabled(pVM));
VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TRPM_SYNC_IDT);
return VINF_PGM_HANDLER_DO_DEFAULT;
}
+# endif /* TRPM_TRACK_GUEST_IDT_CHANGES */
/**
@@ -1182,10 +1171,11 @@ static DECLCALLBACK(int) trpmR3GuestIDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void
* @param pVM Pointer to the VM.
* @param iTrap Trap/interrupt gate number.
*/
-VMMR3DECL(int) trpmR3ClearPassThroughHandler(PVM pVM, unsigned iTrap)
+int trpmR3ClearPassThroughHandler(PVM pVM, unsigned iTrap)
{
/* Only applies to raw mode which supports only 1 VCPU. */
PVMCPU pVCpu = &pVM->aCpus[0];
+ Assert(!HMIsEnabled(pVM));
/** @todo cleanup trpmR3ClearPassThroughHandler()! */
RTRCPTR aGCPtrs[TRPM_HANDLER_MAX];
@@ -1246,6 +1236,8 @@ VMMR3DECL(int) trpmR3ClearPassThroughHandler(PVM pVM, unsigned iTrap)
*/
VMMR3DECL(uint32_t) TRPMR3QueryGateByHandler(PVM pVM, RTRCPTR GCPtr)
{
+ AssertReturn(!HMIsEnabled(pVM), ~0U);
+
for (uint32_t iTrap = 0; iTrap < RT_ELEMENTS(pVM->trpm.s.aGuestTrapHandler); iTrap++)
{
if (pVM->trpm.s.aGuestTrapHandler[iTrap] == GCPtr)
@@ -1275,6 +1267,7 @@ VMMR3DECL(uint32_t) TRPMR3QueryGateByHandler(PVM pVM, RTRCPTR GCPtr)
VMMR3DECL(RTRCPTR) TRPMR3GetGuestTrapHandler(PVM pVM, unsigned iTrap)
{
AssertReturn(iTrap < RT_ELEMENTS(pVM->trpm.s.aIdt), TRPM_INVALID_HANDLER);
+ AssertReturn(!HMIsEnabled(pVM), TRPM_INVALID_HANDLER);
return pVM->trpm.s.aGuestTrapHandler[iTrap];
}
@@ -1293,6 +1286,7 @@ VMMR3DECL(int) TRPMR3SetGuestTrapHandler(PVM pVM, unsigned iTrap, RTRCPTR pHandl
{
/* Only valid in raw mode which implies 1 VCPU */
Assert(PATMIsEnabled(pVM) && pVM->cCpus == 1);
+ AssertReturn(!HMIsEnabled(pVM), VERR_TRPM_HM_IPE);
PVMCPU pVCpu = &pVM->aCpus[0];
/*
@@ -1329,7 +1323,8 @@ VMMR3DECL(int) TRPMR3SetGuestTrapHandler(PVM pVM, unsigned iTrap, RTRCPTR pHandl
return rc;
}
- if (EMIsRawRing0Enabled(pVM))
+ if ( EMIsRawRing0Enabled(pVM)
+ && !EMIsRawRing1Enabled(pVM)) /* can't deal with the ambiguity of ring 1 & 2 in the patch code. */
{
/*
* Only replace handlers for which we are 100% certain there won't be
@@ -1482,6 +1477,7 @@ VMMR3DECL(bool) TRPMR3IsGateHandler(PVM pVM, RTRCPTR GCPtr)
return false;
}
+#endif /* VBOX_WITH_RAW_MODE */
/**
* Inject event (such as external irq or trap)
@@ -1493,12 +1489,11 @@ VMMR3DECL(bool) TRPMR3IsGateHandler(PVM pVM, RTRCPTR GCPtr)
*/
VMMR3DECL(int) TRPMR3InjectEvent(PVM pVM, PVMCPU pVCpu, TRPMEVENT enmEvent)
{
- PCPUMCTX pCtx;
- int rc;
-
- pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+ PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+#ifdef VBOX_WITH_RAW_MODE
Assert(!PATMIsPatchGCAddr(pVM, pCtx->eip));
- Assert(!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
+#endif
+ Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
/* Currently only useful for external hardware interrupts. */
Assert(enmEvent == TRPM_HARDWARE_INT);
@@ -1512,23 +1507,23 @@ VMMR3DECL(int) TRPMR3InjectEvent(PVM pVM, PVMCPU pVCpu, TRPMEVENT enmEvent)
#ifdef TRPM_FORWARD_TRAPS_IN_GC
# ifdef LOG_ENABLED
- DBGFR3InfoLog(pVM, "cpumguest", "TRPMInject");
- DBGFR3DisasInstrCurrentLog(pVCpu, "TRPMInject");
+ DBGFR3_INFO_LOG(pVM, "cpumguest", "TRPMInject");
+ DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "TRPMInject");
# endif
uint8_t u8Interrupt;
- rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
+ int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
Log(("TRPMR3InjectEvent: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
if (RT_SUCCESS(rc))
{
# ifndef IEM_VERIFICATION_MODE
- if (HWACCMIsEnabled(pVM))
+ if (HMIsEnabled(pVM))
# endif
{
rc = TRPMAssertTrap(pVCpu, u8Interrupt, enmEvent);
AssertRC(rc);
STAM_COUNTER_INC(&pVM->trpm.s.paStatForwardedIRQR3[u8Interrupt]);
- return HWACCMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HWACC : VINF_EM_RESCHEDULE_REM;
+ return HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM : VINF_EM_RESCHEDULE_REM;
}
/* If the guest gate is not patched, then we will check (again) if we can patch it. */
if (pVM->trpm.s.aGuestTrapHandler[u8Interrupt] == TRPM_INVALID_HANDLER)
@@ -1547,7 +1542,7 @@ VMMR3DECL(int) TRPMR3InjectEvent(PVM pVM, PVMCPU pVCpu, TRPMEVENT enmEvent)
rc = TRPMForwardTrap(pVCpu, CPUMCTX2CORE(pCtx), u8Interrupt, 0, TRPM_TRAP_NO_ERRORCODE, enmEvent, -1);
if (rc == VINF_SUCCESS /* Don't use RT_SUCCESS */)
{
- Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS));
+ Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS));
STAM_COUNTER_INC(&pVM->trpm.s.paStatForwardedIRQR3[u8Interrupt]);
return VINF_EM_RESCHEDULE_RAW;
@@ -1556,32 +1551,30 @@ VMMR3DECL(int) TRPMR3InjectEvent(PVM pVM, PVMCPU pVCpu, TRPMEVENT enmEvent)
}
else
STAM_COUNTER_INC(&pVM->trpm.s.StatForwardFailNoHandler);
-#ifdef VBOX_WITH_REM
+# ifdef VBOX_WITH_REM
REMR3NotifyPendingInterrupt(pVM, pVCpu, u8Interrupt);
-#endif
+# endif
}
else
{
AssertRC(rc);
- return HWACCMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HWACC : VINF_EM_RESCHEDULE_REM; /* (Heed the halted state if this is changed!) */
+ return HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM : VINF_EM_RESCHEDULE_REM; /* (Heed the halted state if this is changed!) */
}
-#else
- if (HWACCMR3IsActive(pVCpu))
+#else /* !TRPM_FORWARD_TRAPS_IN_GC */
+ if (HMR3IsActive(pVCpu))
{
uint8_t u8Interrupt;
- rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
+ int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
Log(("TRPMR3InjectEvent: u8Interrupt=%d (%#x) rc=%Rrc\n", u8Interrupt, u8Interrupt, rc));
if (RT_SUCCESS(rc))
{
rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
AssertRC(rc);
STAM_COUNTER_INC(&pVM->trpm.s.paStatForwardedIRQR3[u8Interrupt]);
- return VINF_EM_RESCHEDULE_HWACC;
+ return VINF_EM_RESCHEDULE_HM;
}
}
- else
- AssertRC(rc);
-#endif
+#endif /* !TRPM_FORWARD_TRAPS_IN_GC */
}
/** @todo check if it's safe to translate the patch address to the original guest address.
* this implies a safe state in translated instructions and should take sti successors into account (instruction fusing)
diff --git a/src/VBox/VMM/VMMR3/VM.cpp b/src/VBox/VMM/VMMR3/VM.cpp
index 0b280f65..36effa6a 100644
--- a/src/VBox/VMM/VMMR3/VM.cpp
+++ b/src/VBox/VMM/VMMR3/VM.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2010 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -65,7 +65,7 @@
#include <VBox/vmm/iom.h>
#include <VBox/vmm/ssm.h>
#include <VBox/vmm/ftm.h>
-#include <VBox/vmm/hwaccm.h>
+#include <VBox/vmm/hm.h>
#include "VMInternal.h"
#include <VBox/vmm/vm.h>
#include <VBox/vmm/uvm.h>
@@ -90,46 +90,23 @@
/*******************************************************************************
-* Structures and Typedefs *
-*******************************************************************************/
-/**
- * VM destruction callback registration record.
- */
-typedef struct VMATDTOR
-{
- /** Pointer to the next record in the list. */
- struct VMATDTOR *pNext;
- /** Pointer to the callback function. */
- PFNVMATDTOR pfnAtDtor;
- /** The user argument. */
- void *pvUser;
-} VMATDTOR;
-/** Pointer to a VM destruction callback registration record. */
-typedef VMATDTOR *PVMATDTOR;
-
-
-/*******************************************************************************
* Global Variables *
*******************************************************************************/
/** Pointer to the list of VMs. */
static PUVM g_pUVMsHead = NULL;
-/** Pointer to the list of at VM destruction callbacks. */
-static PVMATDTOR g_pVMAtDtorHead = NULL;
-/** Lock the g_pVMAtDtorHead list. */
-#define VM_ATDTOR_LOCK() do { } while (0)
-/** Unlock the g_pVMAtDtorHead list. */
-#define VM_ATDTOR_UNLOCK() do { } while (0)
-
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM);
static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
+static int vmR3ReadBaseConfig(PVM pVM, PUVM pUVM, uint32_t cCpus);
static int vmR3InitRing3(PVM pVM, PUVM pUVM);
static int vmR3InitRing0(PVM pVM);
-static int vmR3InitGC(PVM pVM);
+#ifdef VBOX_WITH_RAW_MODE
+static int vmR3InitRC(PVM pVM);
+#endif
static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
#ifdef LOG_ENABLED
static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
@@ -193,15 +170,21 @@ VMMR3DECL(int) VMR3GlobalInit(void)
* @param pfnCFGMConstructor Pointer to callback function for constructing the VM configuration tree.
* This is called in the context of an EMT0.
* @param pvUserCFGM The user argument passed to pfnCFGMConstructor.
- * @param ppVM Where to store the 'handle' of the created VM.
+ * @param ppVM Where to optionally store the 'handle' of the
+ * created VM.
+ * @param ppUVM Where to optionally store the user 'handle' of
+ * the created VM, this includes one reference as
+ * if VMR3RetainUVM() was called. The caller
+ * *MUST* remember to pass the returned value to
+ * VMR3ReleaseUVM() once done with the handle.
*/
VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
PFNVMATERROR pfnVMAtError, void *pvUserVM,
PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM,
- PVM *ppVM)
+ PVM *ppVM, PUVM *ppUVM)
{
- LogFlow(("VMR3Create: cCpus=%RU32 pVmm2UserMethods=%p pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p\n",
- cCpus, pVmm2UserMethods, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM));
+ LogFlow(("VMR3Create: cCpus=%RU32 pVmm2UserMethods=%p pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p ppUVM=%p\n",
+ cCpus, pVmm2UserMethods, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM, ppUVM));
if (pVmm2UserMethods)
{
@@ -213,11 +196,14 @@ VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtTerm, VERR_INVALID_POINTER);
AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtInit, VERR_INVALID_POINTER);
AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtTerm, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff, VERR_INVALID_POINTER);
AssertReturn(pVmm2UserMethods->u32EndMagic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
}
AssertPtrNullReturn(pfnVMAtError, VERR_INVALID_POINTER);
AssertPtrNullReturn(pfnCFGMConstructor, VERR_INVALID_POINTER);
- AssertPtrReturn(ppVM, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(ppVM, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(ppUVM, VERR_INVALID_POINTER);
+ AssertReturn(ppVM || ppUVM, VERR_INVALID_PARAMETER);
/*
* Because of the current hackiness of the applications
@@ -247,7 +233,7 @@ VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
if (RT_FAILURE(rc))
return rc;
if (pfnVMAtError)
- rc = VMR3AtErrorRegisterU(pUVM, pfnVMAtError, pvUserVM);
+ rc = VMR3AtErrorRegister(pUVM, pfnVMAtError, pvUserVM);
if (RT_SUCCESS(rc))
{
/*
@@ -276,8 +262,14 @@ VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
/*
* Success!
*/
- *ppVM = pUVM->pVM;
- LogFlow(("VMR3Create: returns VINF_SUCCESS *ppVM=%p\n", *ppVM));
+ if (ppVM)
+ *ppVM = pUVM->pVM;
+ if (ppUVM)
+ {
+ VMR3RetainUVM(pUVM);
+ *ppUVM = pUVM;
+ }
+ LogFlow(("VMR3Create: returns VINF_SUCCESS (pVM=%p, pUVM=%p\n", pUVM->pVM, pUVM));
return VINF_SUCCESS;
}
}
@@ -301,7 +293,7 @@ VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
break;
#ifndef RT_OS_DARWIN
- case VERR_HWACCM_CONFIG_MISMATCH:
+ case VERR_HM_CONFIG_MISMATCH:
pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
"This hardware extension is required by the VM configuration");
break;
@@ -354,7 +346,7 @@ VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
"pack' which must be downloaded and installed separately");
break;
- case VERR_PCI_PASSTHROUGH_NO_HWACCM:
+ case VERR_PCI_PASSTHROUGH_NO_HM:
pszError = N_("PCI passthrough requires VT-x/AMD-V");
break;
@@ -363,7 +355,7 @@ VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
break;
default:
- if (VMR3GetErrorCountU(pUVM) == 0)
+ if (VMR3GetErrorCount(pUVM) == 0)
pszError = RTErrGetFull(rc);
else
pszError = NULL; /* already set. */
@@ -525,13 +517,13 @@ static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUV
/*
* Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr.
*/
- rc = STAMR3InitUVM(pUVM);
+ rc = PDMR3InitUVM(pUVM);
if (RT_SUCCESS(rc))
{
- rc = MMR3InitUVM(pUVM);
+ rc = STAMR3InitUVM(pUVM);
if (RT_SUCCESS(rc))
{
- rc = PDMR3InitUVM(pUVM);
+ rc = MMR3InitUVM(pUVM);
if (RT_SUCCESS(rc))
{
/*
@@ -539,8 +531,8 @@ static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUV
*/
for (i = 0; i < cCpus; i++)
{
- rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i], _1M,
- RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE,
+ rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i],
+ _1M, RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE,
cCpus > 1 ? "EMT-%u" : "EMT", i);
if (RT_FAILURE(rc))
break;
@@ -559,11 +551,11 @@ static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUV
{
/** @todo rainy day: terminate the EMTs. */
}
- PDMR3TermUVM(pUVM);
+ MMR3TermUVM(pUVM);
}
- MMR3TermUVM(pUVM);
+ STAMR3TermUVM(pUVM);
}
- STAMR3TermUVM(pUVM);
+ PDMR3TermUVM(pUVM);
}
RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
}
@@ -652,65 +644,7 @@ static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMCons
rc = CFGMR3Init(pVM, pfnCFGMConstructor, pvUserCFGM);
if (RT_SUCCESS(rc))
{
- PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
- rc = CFGMR3QueryBoolDef(pRoot, "HwVirtExtForced", &pVM->fHwVirtExtForced, false);
- if (RT_SUCCESS(rc) && pVM->fHwVirtExtForced)
- pVM->fHWACCMEnabled = true;
-
- /*
- * If executing in fake suplib mode disable RR3 and RR0 in the config.
- */
- const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
- if (psz && !strcmp(psz, "fake"))
- {
- CFGMR3RemoveValue(pRoot, "RawR3Enabled");
- CFGMR3InsertInteger(pRoot, "RawR3Enabled", 0);
- CFGMR3RemoveValue(pRoot, "RawR0Enabled");
- CFGMR3InsertInteger(pRoot, "RawR0Enabled", 0);
- }
-
- /*
- * Make sure the CPU count in the config data matches.
- */
- if (RT_SUCCESS(rc))
- {
- uint32_t cCPUsCfg;
- rc = CFGMR3QueryU32Def(pRoot, "NumCPUs", &cCPUsCfg, 1);
- AssertLogRelMsgRC(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc));
- if (RT_SUCCESS(rc) && cCPUsCfg != cCpus)
- {
- AssertLogRelMsgFailed(("Configuration error: \"NumCPUs\"=%RU32 and VMR3CreateVM::cCpus=%RU32 does not match!\n",
- cCPUsCfg, cCpus));
- rc = VERR_INVALID_PARAMETER;
- }
- }
-
- /*
- * Get the CPU execution cap.
- */
- if (RT_SUCCESS(rc))
- {
- rc = CFGMR3QueryU32Def(pRoot, "CpuExecutionCap", &pVM->uCpuExecutionCap, 100);
- AssertLogRelMsgRC(rc, ("Configuration error: Querying \"CpuExecutionCap\" as integer failed, rc=%Rrc\n", rc));
- }
-
- /*
- * Get the VM name and UUID.
- */
- if (RT_SUCCESS(rc))
- {
- rc = CFGMR3QueryStringAllocDef(pRoot, "Name", &pUVM->vm.s.pszName, "<unknown>");
- AssertLogRelMsg(RT_SUCCESS(rc), ("Configuration error: Querying \"Name\" failed, rc=%Rrc\n", rc));
- }
-
- if (RT_SUCCESS(rc))
- {
- rc = CFGMR3QueryBytes(pRoot, "UUID", &pUVM->vm.s.Uuid, sizeof(pUVM->vm.s.Uuid));
- if (rc == VERR_CFGM_VALUE_NOT_FOUND)
- rc = VINF_SUCCESS;
- AssertLogRelMsg(RT_SUCCESS(rc), ("Configuration error: Querying \"UUID\" failed, rc=%Rrc\n", rc));
- }
-
+ rc = vmR3ReadBaseConfig(pVM, pUVM, cCpus);
if (RT_SUCCESS(rc))
{
/*
@@ -741,17 +675,19 @@ static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMCons
* with debugger support.
*/
void *pvUser = NULL;
- rc = DBGCTcpCreate(pVM, &pvUser);
+ rc = DBGCTcpCreate(pUVM, &pvUser);
if ( RT_SUCCESS(rc)
|| rc == VERR_NET_ADDRESS_IN_USE)
{
pUVM->vm.s.pvDBGC = pvUser;
#endif
/*
- * Init the Guest Context components.
+ * Init the Raw-Mode Context components.
*/
- rc = vmR3InitGC(pVM);
+#ifdef VBOX_WITH_RAW_MODE
+ rc = vmR3InitRC(pVM);
if (RT_SUCCESS(rc))
+#endif
{
/*
* Now we can safely set the VM halt method to default.
@@ -760,11 +696,9 @@ static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMCons
if (RT_SUCCESS(rc))
{
/*
- * Set the state and link into the global list.
+ * Set the state and we're done.
*/
vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
- pUVM->pNext = g_pUVMsHead;
- g_pUVMsHead = pUVM;
#ifdef LOG_ENABLED
RTLogSetCustomPrefixCallback(NULL, vmR3LogPrefixCallback, pUVM);
@@ -773,7 +707,7 @@ static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMCons
}
}
#ifdef VBOX_WITH_DEBUGGER
- DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
+ DBGCTcpTerminate(pUVM, pUVM->vm.s.pvDBGC);
pUVM->vm.s.pvDBGC = NULL;
}
#endif
@@ -794,7 +728,7 @@ static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMCons
* Do automatic cleanups while the VM structure is still alive and all
* references to it are still working.
*/
- PDMR3CritSectTerm(pVM);
+ PDMR3CritSectBothTerm(pVM);
/*
* Drop all references to VM and the VMCPU structures, then
@@ -830,6 +764,92 @@ static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMCons
/**
+ * Reads the base configuation from CFGM.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM structure.
+ * @param pUVM The user mode VM structure.
+ * @param cCpus The CPU count given to VMR3Create.
+ */
+static int vmR3ReadBaseConfig(PVM pVM, PUVM pUVM, uint32_t cCpus)
+{
+ int rc;
+ PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
+
+ /*
+ * If executing in fake suplib mode disable RR3 and RR0 in the config.
+ */
+ const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
+ if (psz && !strcmp(psz, "fake"))
+ {
+ CFGMR3RemoveValue(pRoot, "RawR3Enabled");
+ CFGMR3InsertInteger(pRoot, "RawR3Enabled", 0);
+ CFGMR3RemoveValue(pRoot, "RawR0Enabled");
+ CFGMR3InsertInteger(pRoot, "RawR0Enabled", 0);
+ }
+
+ /*
+ * Base EM and HM config properties.
+ */
+ Assert(pVM->fRecompileUser == false); /* ASSUMES all zeros at this point */
+#ifdef VBOX_WITH_RAW_MODE
+ bool fEnabled;
+ rc = CFGMR3QueryBoolDef(pRoot, "RawR3Enabled", &fEnabled, false); AssertRCReturn(rc, rc);
+ pVM->fRecompileUser = !fEnabled;
+ rc = CFGMR3QueryBoolDef(pRoot, "RawR0Enabled", &fEnabled, false); AssertRCReturn(rc, rc);
+ pVM->fRecompileSupervisor = !fEnabled;
+# ifdef VBOX_WITH_RAW_RING1
+ rc = CFGMR3QueryBoolDef(pRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
+# endif
+ rc = CFGMR3QueryBoolDef(pRoot, "PATMEnabled", &pVM->fPATMEnabled, true); AssertRCReturn(rc, rc);
+ rc = CFGMR3QueryBoolDef(pRoot, "CSAMEnabled", &pVM->fCSAMEnabled, true); AssertRCReturn(rc, rc);
+ rc = CFGMR3QueryBoolDef(pRoot, "HMEnabled", &pVM->fHMEnabled, true); AssertRCReturn(rc, rc);
+#else
+ pVM->fHMEnabled = true;
+#endif
+ Assert(!pVM->fHMEnabledFixed);
+ LogRel(("VM: fHMEnabled=%RTbool (configured) fRecompileUser=%RTbool fRecompileSupervisor=%RTbool\n"
+ "VM: fRawRing1Enabled=%RTbool CSAM=%RTbool PATM=%RTbool\n",
+ pVM->fHMEnabled, pVM->fRecompileUser, pVM->fRecompileSupervisor,
+ pVM->fRawRing1Enabled, pVM->fCSAMEnabled, pVM->fPATMEnabled));
+
+
+ /*
+ * Make sure the CPU count in the config data matches.
+ */
+ uint32_t cCPUsCfg;
+ rc = CFGMR3QueryU32Def(pRoot, "NumCPUs", &cCPUsCfg, 1);
+ AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc), rc);
+ AssertLogRelMsgReturn(cCPUsCfg == cCpus,
+ ("Configuration error: \"NumCPUs\"=%RU32 and VMR3Create::cCpus=%RU32 does not match!\n",
+ cCPUsCfg, cCpus),
+ VERR_INVALID_PARAMETER);
+
+ /*
+ * Get the CPU execution cap.
+ */
+ rc = CFGMR3QueryU32Def(pRoot, "CpuExecutionCap", &pVM->uCpuExecutionCap, 100);
+ AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"CpuExecutionCap\" as integer failed, rc=%Rrc\n", rc), rc);
+
+ /*
+ * Get the VM name and UUID.
+ */
+ rc = CFGMR3QueryStringAllocDef(pRoot, "Name", &pUVM->vm.s.pszName, "<unknown>");
+ AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"Name\" failed, rc=%Rrc\n", rc), rc);
+
+ rc = CFGMR3QueryBytes(pRoot, "UUID", &pUVM->vm.s.Uuid, sizeof(pUVM->vm.s.Uuid));
+ if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+ rc = VINF_SUCCESS;
+ AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"UUID\" failed, rc=%Rrc\n", rc), rc);
+
+ rc = CFGMR3QueryBoolDef(pRoot, "PowerOffInsteadOfReset", &pVM->vm.s.fPowerOffInsteadOfReset, false);
+ AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"PowerOffInsteadOfReset\" failed, rc=%Rrc\n", rc), rc);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
* Register the calling EMT with GVM.
*
* @returns VBox status code.
@@ -864,55 +884,59 @@ static int vmR3InitRing3(PVM pVM, PUVM pUVM)
}
/*
- * Init all R3 components, the order here might be important.
+ * Register statistics.
*/
- rc = MMR3Init(pVM);
- if (RT_SUCCESS(rc))
+ STAM_REG(pVM, &pVM->StatTotalInGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/InGC", STAMUNIT_TICKS_PER_CALL, "Profiling the total time spent in GC.");
+ STAM_REG(pVM, &pVM->StatSwitcherToGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToGC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
+ STAM_REG(pVM, &pVM->StatSwitcherToHC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToHC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to HC.");
+ STAM_REG(pVM, &pVM->StatSwitcherSaveRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SaveRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
+ STAM_REG(pVM, &pVM->StatSwitcherSysEnter, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SysEnter", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
+ STAM_REG(pVM, &pVM->StatSwitcherDebug, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Debug", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
+ STAM_REG(pVM, &pVM->StatSwitcherCR0, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR0", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
+ STAM_REG(pVM, &pVM->StatSwitcherCR4, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR4", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
+ STAM_REG(pVM, &pVM->StatSwitcherLgdt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lgdt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
+ STAM_REG(pVM, &pVM->StatSwitcherLidt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lidt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
+ STAM_REG(pVM, &pVM->StatSwitcherLldt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lldt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
+ STAM_REG(pVM, &pVM->StatSwitcherTSS, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/TSS", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
+ STAM_REG(pVM, &pVM->StatSwitcherJmpCR3, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/JmpCR3", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
+ STAM_REG(pVM, &pVM->StatSwitcherRstrRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/RstrRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
+
+ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
{
- STAM_REG(pVM, &pVM->StatTotalInGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/InGC", STAMUNIT_TICKS_PER_CALL, "Profiling the total time spent in GC.");
- STAM_REG(pVM, &pVM->StatSwitcherToGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToGC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
- STAM_REG(pVM, &pVM->StatSwitcherToHC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToHC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to HC.");
- STAM_REG(pVM, &pVM->StatSwitcherSaveRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SaveRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
- STAM_REG(pVM, &pVM->StatSwitcherSysEnter, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SysEnter", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
- STAM_REG(pVM, &pVM->StatSwitcherDebug, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Debug", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
- STAM_REG(pVM, &pVM->StatSwitcherCR0, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR0", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
- STAM_REG(pVM, &pVM->StatSwitcherCR4, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR4", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
- STAM_REG(pVM, &pVM->StatSwitcherLgdt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lgdt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
- STAM_REG(pVM, &pVM->StatSwitcherLidt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lidt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
- STAM_REG(pVM, &pVM->StatSwitcherLldt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lldt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
- STAM_REG(pVM, &pVM->StatSwitcherTSS, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/TSS", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
- STAM_REG(pVM, &pVM->StatSwitcherJmpCR3, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/JmpCR3", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
- STAM_REG(pVM, &pVM->StatSwitcherRstrRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/RstrRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
-
- for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
- {
- rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state yielding.", "/PROF/VM/CPU%d/Halt/Yield", idCpu);
- AssertRC(rc);
- rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state blocking.", "/PROF/VM/CPU%d/Halt/Block", idCpu);
- AssertRC(rc);
- rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time wasted by blocking too long.", "/PROF/VM/CPU%d/Halt/BlockOverslept", idCpu);
- AssertRC(rc);
- rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept when returning to early.","/PROF/VM/CPU%d/Halt/BlockInsomnia", idCpu);
- AssertRC(rc);
- rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept on time.", "/PROF/VM/CPU%d/Halt/BlockOnTime", idCpu);
- AssertRC(rc);
- rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/VM/CPU%d/Halt/Timers", idCpu);
- AssertRC(rc);
- }
+ rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state yielding.", "/PROF/CPU%d/VM/Halt/Yield", idCpu);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state blocking.", "/PROF/CPU%d/VM/Halt/Block", idCpu);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time wasted by blocking too long.", "/PROF/CPU%d/VM/Halt/BlockOverslept", idCpu);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept when returning to early.","/PROF/CPU%d/VM/Halt/BlockInsomnia", idCpu);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept on time.", "/PROF/CPU%d/VM/Halt/BlockOnTime", idCpu);
+ AssertRC(rc);
+ rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/CPU%d/VM/Halt/Timers", idCpu);
+ AssertRC(rc);
+ }
- STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
- STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
- STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
- STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
- STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
- STAM_REG(pVM, &pUVM->vm.s.StatReqProcessed, STAMTYPE_COUNTER, "/VM/Req/Processed", STAMUNIT_OCCURENCES, "Number of processed requests (any queue).");
- STAM_REG(pVM, &pUVM->vm.s.StatReqMoreThan1, STAMTYPE_COUNTER, "/VM/Req/MoreThan1", STAMUNIT_OCCURENCES, "Number of times there are more than one request on the queue when processing it.");
- STAM_REG(pVM, &pUVM->vm.s.StatReqPushBackRaces, STAMTYPE_COUNTER, "/VM/Req/PushBackRaces", STAMUNIT_OCCURENCES, "Number of push back races.");
+ STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
+ STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
+ STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
+ STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
+ STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
+ STAM_REG(pVM, &pUVM->vm.s.StatReqProcessed, STAMTYPE_COUNTER, "/VM/Req/Processed", STAMUNIT_OCCURENCES, "Number of processed requests (any queue).");
+ STAM_REG(pVM, &pUVM->vm.s.StatReqMoreThan1, STAMTYPE_COUNTER, "/VM/Req/MoreThan1", STAMUNIT_OCCURENCES, "Number of times there are more than one request on the queue when processing it.");
+ STAM_REG(pVM, &pUVM->vm.s.StatReqPushBackRaces, STAMTYPE_COUNTER, "/VM/Req/PushBackRaces", STAMUNIT_OCCURENCES, "Number of push back races.");
- rc = CPUMR3Init(pVM);
+ /*
+ * Init all R3 components, the order here might be important.
+ * HM shall be initialized first!
+ */
+ rc = HMR3Init(pVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = MMR3Init(pVM);
if (RT_SUCCESS(rc))
{
- rc = HWACCMR3Init(pVM);
+ rc = CPUMR3Init(pVM);
if (RT_SUCCESS(rc))
{
rc = PGMR3Init(pVM);
@@ -940,12 +964,14 @@ static int vmR3InitRing3(PVM pVM, PUVM pUVM)
rc = TRPMR3Init(pVM);
if (RT_SUCCESS(rc))
{
+#ifdef VBOX_WITH_RAW_MODE
rc = CSAMR3Init(pVM);
if (RT_SUCCESS(rc))
{
rc = PATMR3Init(pVM);
if (RT_SUCCESS(rc))
{
+#endif
rc = IOMR3Init(pVM);
if (RT_SUCCESS(rc))
{
@@ -964,8 +990,10 @@ static int vmR3InitRing3(PVM pVM, PUVM pUVM)
rc = PGMR3InitDynMap(pVM);
if (RT_SUCCESS(rc))
rc = MMR3HyperInitFinalize(pVM);
+#ifdef VBOX_WITH_RAW_MODE
if (RT_SUCCESS(rc))
rc = PATMR3InitFinalize(pVM);
+#endif
if (RT_SUCCESS(rc))
rc = PGMR3InitFinalize(pVM);
if (RT_SUCCESS(rc))
@@ -977,6 +1005,11 @@ static int vmR3InitRing3(PVM pVM, PUVM pUVM)
rc = REMR3InitFinalize(pVM);
#endif
if (RT_SUCCESS(rc))
+ {
+ PGMR3MemSetup(pVM, false /*fAtReset*/);
+ PDMR3MemSetup(pVM, false /*fAtReset*/);
+ }
+ if (RT_SUCCESS(rc))
rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING3);
if (RT_SUCCESS(rc))
{
@@ -999,12 +1032,14 @@ static int vmR3InitRing3(PVM pVM, PUVM pUVM)
int rc2 = IOMR3Term(pVM);
AssertRC(rc2);
}
+#ifdef VBOX_WITH_RAW_MODE
int rc2 = PATMR3Term(pVM);
AssertRC(rc2);
}
int rc2 = CSAMR3Term(pVM);
AssertRC(rc2);
}
+#endif
int rc2 = TRPMR3Term(pVM);
AssertRC(rc2);
}
@@ -1028,15 +1063,16 @@ static int vmR3InitRing3(PVM pVM, PUVM pUVM)
int rc2 = PGMR3Term(pVM);
AssertRC(rc2);
}
- int rc2 = HWACCMR3Term(pVM);
- AssertRC(rc2);
+ //int rc2 = CPUMR3Term(pVM);
+ //AssertRC(rc2);
}
- //int rc2 = CPUMR3Term(pVM);
- //AssertRC(rc2);
+ /* MMR3Term is not called here because it'll kill the heap. */
}
- /* MMR3Term is not called here because it'll kill the heap. */
+ int rc2 = HMR3Term(pVM);
+ AssertRC(rc2);
}
+
LogFlow(("vmR3InitRing3: returns %Rrc\n", rc));
return rc;
}
@@ -1070,23 +1106,24 @@ static int vmR3InitRing0(PVM pVM)
if (RT_SUCCESS(rc))
rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
if (RT_SUCCESS(rc))
- rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_HWACCM);
+ rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_HM);
- /** @todo Move this to the VMINITCOMPLETED_HWACCM notification handler. */
+ /** @todo Move this to the VMINITCOMPLETED_HM notification handler. */
if (RT_SUCCESS(rc))
- CPUMR3SetHWVirtEx(pVM, HWACCMIsEnabled(pVM));
+ CPUMR3SetHWVirtEx(pVM, HMIsEnabled(pVM));
LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
return rc;
}
+#ifdef VBOX_WITH_RAW_MODE
/**
- * Initializes all GC components of the VM
+ * Initializes all RC components of the VM
*/
-static int vmR3InitGC(PVM pVM)
+static int vmR3InitRC(PVM pVM)
{
- LogFlow(("vmR3InitGC:\n"));
+ LogFlow(("vmR3InitRC:\n"));
/*
* Check for FAKE suplib mode.
@@ -1101,16 +1138,17 @@ static int vmR3InitGC(PVM pVM)
rc = VMMR3InitRC(pVM);
}
else
- Log(("vmR3InitGC: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
+ Log(("vmR3InitRC: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
/*
* Do notifications and return.
*/
if (RT_SUCCESS(rc))
- rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_GC);
- LogFlow(("vmR3InitGC: returns %Rrc\n", rc));
+ rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RC);
+ LogFlow(("vmR3InitRC: returns %Rrc\n", rc));
return rc;
}
+#endif /* VBOX_WITH_RAW_MODE */
/**
@@ -1124,9 +1162,18 @@ static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
{
int rc = VMMR3InitCompleted(pVM, enmWhat);
if (RT_SUCCESS(rc))
- rc = HWACCMR3InitCompleted(pVM, enmWhat);
+ rc = HMR3InitCompleted(pVM, enmWhat);
if (RT_SUCCESS(rc))
rc = PGMR3InitCompleted(pVM, enmWhat);
+#ifndef VBOX_WITH_RAW_MODE
+ if (enmWhat == VMINITCOMPLETED_RING3)
+ {
+ if (RT_SUCCESS(rc))
+ rc = SSMR3RegisterStub(pVM, "CSAM", 0);
+ if (RT_SUCCESS(rc))
+ rc = SSMR3RegisterStub(pVM, "PATM", 0);
+ }
+#endif
return rc;
}
@@ -1176,7 +1223,7 @@ static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBu
* @param pVM Pointer to the VM.
* @param offDelta Relocation delta relative to old location.
*/
-VMMR3DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
+VMMR3_INT_DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
{
LogFlow(("VMR3Relocate: offDelta=%RGv\n", offDelta));
@@ -1187,13 +1234,15 @@ VMMR3DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
PDMR3LdrRelocateU(pVM->pUVM, offDelta);
PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */
CPUMR3Relocate(pVM);
- HWACCMR3Relocate(pVM);
+ HMR3Relocate(pVM);
SELMR3Relocate(pVM);
VMMR3Relocate(pVM, offDelta);
SELMR3Relocate(pVM); /* !hack! fix stack! */
TRPMR3Relocate(pVM, offDelta);
+#ifdef VBOX_WITH_RAW_MODE
PATMR3Relocate(pVM);
CSAMR3Relocate(pVM, offDelta);
+#endif
IOMR3Relocate(pVM, offDelta);
EMR3Relocate(pVM);
TMR3Relocate(pVM, offDelta);
@@ -1258,15 +1307,17 @@ static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOn(PVM pVM, PVMCPU pVCpu, void *pvUse
*
* @returns VBox status code.
*
- * @param pVM The VM to power on.
+ * @param pUVM The VM to power on.
*
* @thread Any thread.
* @vmstate Created
* @vmstateto PoweringOn+Running
*/
-VMMR3DECL(int) VMR3PowerOn(PVM pVM)
+VMMR3DECL(int) VMR3PowerOn(PUVM pUVM)
{
- LogFlow(("VMR3PowerOn: pVM=%p\n", pVM));
+ LogFlow(("VMR3PowerOn: pUVM=%p\n", pUVM));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
/*
@@ -1304,8 +1355,8 @@ static void vmR3SuspendDoWork(PVM pVM)
*/
static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
{
- LogFlow(("vmR3Suspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
- Assert(!pvUser); NOREF(pvUser);
+ VMSUSPENDREASON enmReason = (VMSUSPENDREASON)(uintptr_t)pvUser;
+ LogFlow(("vmR3Suspend: pVM=%p pVCpu=%p/#%u enmReason=%d\n", pVM, pVCpu, pVCpu->idCpu, enmReason));
/*
* The first EMT switches the state to suspending. If this fails because
@@ -1319,6 +1370,7 @@ static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUse
VMSTATE_SUSPENDING_EXT_LS, VMSTATE_RUNNING_LS);
if (RT_FAILURE(rc))
return rc;
+ pVM->pUVM->vm.s.enmSuspendReason = enmReason;
}
VMSTATE enmVMState = VMR3GetState(pVM);
@@ -1352,29 +1404,45 @@ static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUse
* @returns VBox status code. When called on EMT, this will be a strict status
* code that has to be propagated up the call stack.
*
- * @param pVM The VM to suspend.
+ * @param pUVM The VM to suspend.
+ * @param enmReason The reason for suspending.
*
* @thread Any thread.
* @vmstate Running or RunningLS
* @vmstateto Suspending + Suspended or SuspendingExtLS + SuspendedExtLS
*/
-VMMR3DECL(int) VMR3Suspend(PVM pVM)
+VMMR3DECL(int) VMR3Suspend(PUVM pUVM, VMSUSPENDREASON enmReason)
{
- LogFlow(("VMR3Suspend: pVM=%p\n", pVM));
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ LogFlow(("VMR3Suspend: pUVM=%p\n", pUVM));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(enmReason > VMSUSPENDREASON_INVALID && enmReason < VMSUSPENDREASON_END, VERR_INVALID_PARAMETER);
/*
* Gather all the EMTs to make sure there are no races before
* changing the VM state.
*/
- int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
- vmR3Suspend, NULL);
+ int rc = VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
+ vmR3Suspend, (void *)(uintptr_t)enmReason);
LogFlow(("VMR3Suspend: returns %Rrc\n", rc));
return rc;
}
/**
+ * Retrieves the reason for the most recent suspend.
+ *
+ * @returns Suspend reason. VMSUSPENDREASON_INVALID if no suspend has been done
+ * or the handle is invalid.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(VMSUSPENDREASON) VMR3GetSuspendReason(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSUSPENDREASON_INVALID);
+ return pUVM->vm.s.enmSuspendReason;
+}
+
+
+/**
* EMT rendezvous worker for VMR3Resume.
*
* @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
@@ -1382,12 +1450,12 @@ VMMR3DECL(int) VMR3Suspend(PVM pVM)
*
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU of the EMT.
- * @param pvUser Ignored.
+ * @param pvUser Reason.
*/
static DECLCALLBACK(VBOXSTRICTRC) vmR3Resume(PVM pVM, PVMCPU pVCpu, void *pvUser)
{
- LogFlow(("vmR3Resume: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
- Assert(!pvUser); NOREF(pvUser);
+ VMRESUMEREASON enmReason = (VMRESUMEREASON)(uintptr_t)pvUser;
+ LogFlow(("vmR3Resume: pVM=%p pVCpu=%p/#%u enmReason=%d\n", pVM, pVCpu, pVCpu->idCpu, enmReason));
/*
* The first thread thru here tries to change the state. We shouldn't be
@@ -1398,6 +1466,7 @@ static DECLCALLBACK(VBOXSTRICTRC) vmR3Resume(PVM pVM, PVMCPU pVCpu, void *pvUser
int rc = vmR3TrySetState(pVM, "VMR3Resume", 1, VMSTATE_RESUMING, VMSTATE_SUSPENDED);
if (RT_FAILURE(rc))
return rc;
+ pVM->pUVM->vm.s.enmResumeReason = enmReason;
}
VMSTATE enmVMState = VMR3GetState(pVM);
@@ -1434,28 +1503,46 @@ static DECLCALLBACK(VBOXSTRICTRC) vmR3Resume(PVM pVM, PVMCPU pVCpu, void *pvUser
* code that has to be propagated up the call stack.
*
* @param pVM The VM to resume.
+ * @param enmReason The reason we're resuming.
*
* @thread Any thread.
* @vmstate Suspended
* @vmstateto Running
*/
-VMMR3DECL(int) VMR3Resume(PVM pVM)
+VMMR3DECL(int) VMR3Resume(PUVM pUVM, VMRESUMEREASON enmReason)
{
- LogFlow(("VMR3Resume: pVM=%p\n", pVM));
+ LogFlow(("VMR3Resume: pUVM=%p\n", pUVM));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ AssertReturn(enmReason > VMRESUMEREASON_INVALID && enmReason < VMRESUMEREASON_END, VERR_INVALID_PARAMETER);
/*
* Gather all the EMTs to make sure there are no races before
* changing the VM state.
*/
int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
- vmR3Resume, NULL);
+ vmR3Resume, (void *)(uintptr_t)enmReason);
LogFlow(("VMR3Resume: returns %Rrc\n", rc));
return rc;
}
/**
+ * Retrieves the reason for the most recent resume.
+ *
+ * @returns Resume reason. VMRESUMEREASON_INVALID if no suspend has been
+ * done or the handle is invalid.
+ * @param pUVM The user mode VM handle.
+ */
+VMMR3DECL(VMRESUMEREASON) VMR3GetResumeReason(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMRESUMEREASON_INVALID);
+ return pUVM->vm.s.enmResumeReason;
+}
+
+
+/**
* EMT rendezvous worker for VMR3Save and VMR3Teleport that suspends the VM
* after the live step has been completed.
*
@@ -1817,7 +1904,7 @@ static int vmR3SaveTeleport(PVM pVM, uint32_t cMsMaxDowntime,
*
* @returns VBox status code.
*
- * @param pVM The VM which state should be saved.
+ * @param pUVM The VM which state should be saved.
* @param pszFilename The name of the save state file.
* @param pStreamOps The stream methods.
* @param pvStreamOpsUser The user argument to the stream methods.
@@ -1832,16 +1919,18 @@ static int vmR3SaveTeleport(PVM pVM, uint32_t cMsMaxDowntime,
* @vmstateto Saving+Suspended or
* RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
*/
-VMMR3DECL(int) VMR3Save(PVM pVM, const char *pszFilename, bool fContinueAfterwards, PFNVMPROGRESS pfnProgress, void *pvUser, bool *pfSuspended)
+VMMR3DECL(int) VMR3Save(PUVM pUVM, const char *pszFilename, bool fContinueAfterwards, PFNVMPROGRESS pfnProgress, void *pvUser, bool *pfSuspended)
{
- LogFlow(("VMR3Save: pVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p pfSuspended=%p\n",
- pVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser, pfSuspended));
+ LogFlow(("VMR3Save: pUVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p pfSuspended=%p\n",
+ pUVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser, pfSuspended));
/*
* Validate input.
*/
AssertPtr(pfSuspended);
*pfSuspended = false;
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
VM_ASSERT_OTHER_THREAD(pVM);
AssertReturn(VALID_PTR(pszFilename), VERR_INVALID_POINTER);
@@ -1863,13 +1952,6 @@ VMMR3DECL(int) VMR3Save(PVM pVM, const char *pszFilename, bool fContinueAfterwar
/**
* Save current VM state (used by FTM)
*
- * Can be used for both saving the state and creating snapshots.
- *
- * When called for a VM in the Running state, the saved state is created live
- * and the VM is only suspended when the final part of the saving is preformed.
- * The VM state will not be restored to Running in this case and it's up to the
- * caller to call VMR3Resume if this is desirable. (The rational is that the
- * caller probably wish to reconfigure the disks before resuming the VM.)
*
* @returns VBox status code.
*
@@ -1884,17 +1966,18 @@ VMMR3DECL(int) VMR3Save(PVM pVM, const char *pszFilename, bool fContinueAfterwar
* @vmstateto Saving+Suspended or
* RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
*/
-VMMR3DECL(int) VMR3SaveFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser, bool *pfSuspended,
- bool fSkipStateChanges)
+VMMR3_INT_DECL(int) VMR3SaveFT(PUVM pUVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser, bool *pfSuspended, bool fSkipStateChanges)
{
- LogFlow(("VMR3SaveFT: pVM=%p pStreamOps=%p pvSteamOpsUser=%p pfSuspended=%p\n",
- pVM, pStreamOps, pvStreamOpsUser, pfSuspended));
+ LogFlow(("VMR3SaveFT: pUVM=%p pStreamOps=%p pvSteamOpsUser=%p pfSuspended=%p\n",
+ pUVM, pStreamOps, pvStreamOpsUser, pfSuspended));
/*
* Validate input.
*/
AssertPtr(pfSuspended);
*pfSuspended = false;
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
AssertReturn(pStreamOps, VERR_INVALID_PARAMETER);
@@ -1915,7 +1998,7 @@ VMMR3DECL(int) VMR3SaveFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUse
*
* @returns VBox status code.
*
- * @param pVM The VM which state should be saved.
+ * @param pUVM The VM which state should be saved.
* @param cMsMaxDowntime The maximum downtime given as milliseconds.
* @param pStreamOps The stream methods.
* @param pvStreamOpsUser The user argument to the stream methods.
@@ -1928,17 +2011,19 @@ VMMR3DECL(int) VMR3SaveFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUse
* @vmstateto Saving+Suspended or
* RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
*/
-VMMR3DECL(int) VMR3Teleport(PVM pVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
+VMMR3DECL(int) VMR3Teleport(PUVM pUVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
{
- LogFlow(("VMR3Teleport: pVM=%p cMsMaxDowntime=%u pStreamOps=%p pvStreamOps=%p pfnProgress=%p pvProgressUser=%p\n",
- pVM, cMsMaxDowntime, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
+ LogFlow(("VMR3Teleport: pUVM=%p cMsMaxDowntime=%u pStreamOps=%p pvStreamOps=%p pfnProgress=%p pvProgressUser=%p\n",
+ pUVM, cMsMaxDowntime, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
/*
* Validate input.
*/
AssertPtr(pfSuspended);
*pfSuspended = false;
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
VM_ASSERT_OTHER_THREAD(pVM);
AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
@@ -1962,7 +2047,7 @@ VMMR3DECL(int) VMR3Teleport(PVM pVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStre
*
* @returns VBox status code.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM Pointer to the VM.
* @param pszFilename The name of the file. NULL if pStreamOps is used.
* @param pStreamOps The stream methods. NULL if pszFilename is used.
* @param pvStreamOpsUser The user argument to the stream methods.
@@ -1973,19 +2058,21 @@ VMMR3DECL(int) VMR3Teleport(PVM pVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStre
*
* @thread EMT.
*/
-static DECLCALLBACK(int) vmR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
+static DECLCALLBACK(int) vmR3Load(PUVM pUVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool fTeleporting,
bool fSkipStateChanges)
{
int rc = VINF_SUCCESS;
- LogFlow(("vmR3Load: pVM=%p pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p fTeleporting=%RTbool\n",
- pVM, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser, fTeleporting));
+ LogFlow(("vmR3Load: pUVM=%p pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p fTeleporting=%RTbool\n",
+ pUVM, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser, fTeleporting));
/*
* Validate input (paranoia).
*/
- AssertPtr(pVM);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
AssertPtrNull(pszFilename);
AssertPtrNull(pStreamOps);
AssertPtrNull(pfnProgress);
@@ -1999,14 +2086,14 @@ static DECLCALLBACK(int) vmR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS
* selectors and such are correct.
*/
rc = vmR3TrySetState(pVM, "VMR3Load", 2,
- VMSTATE_LOADING, VMSTATE_CREATED,
- VMSTATE_LOADING, VMSTATE_SUSPENDED);
+ VMSTATE_LOADING, VMSTATE_CREATED,
+ VMSTATE_LOADING, VMSTATE_SUSPENDED);
if (RT_FAILURE(rc))
return rc;
}
pVM->vm.s.fTeleportedAndNotFullyResumedYet = fTeleporting;
- uint32_t cErrorsPriorToSave = VMR3GetErrorCount(pVM);
+ uint32_t cErrorsPriorToSave = VMR3GetErrorCount(pUVM);
rc = SSMR3Load(pVM, pszFilename, pStreamOps, pvStreamOpsUser, SSMAFTER_RESUME, pfnProgress, pvProgressUser);
if (RT_SUCCESS(rc))
{
@@ -2020,7 +2107,7 @@ static DECLCALLBACK(int) vmR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS
if (!fSkipStateChanges)
vmR3SetState(pVM, VMSTATE_LOAD_FAILURE, VMSTATE_LOADING);
- if (cErrorsPriorToSave == VMR3GetErrorCount(pVM))
+ if (cErrorsPriorToSave == VMR3GetErrorCount(pUVM))
rc = VMSetError(pVM, rc, RT_SRC_POS,
N_("Unable to restore the virtual machine's saved state from '%s'. "
"It may be damaged or from an older version of VirtualBox. "
@@ -2049,24 +2136,24 @@ static DECLCALLBACK(int) vmR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS
* @vmstate Created, Suspended
* @vmstateto Loading+Suspended
*/
-VMMR3DECL(int) VMR3LoadFromFile(PVM pVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
+VMMR3DECL(int) VMR3LoadFromFile(PUVM pUVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
{
- LogFlow(("VMR3LoadFromFile: pVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n",
- pVM, pszFilename, pszFilename, pfnProgress, pvUser));
+ LogFlow(("VMR3LoadFromFile: pUVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n",
+ pUVM, pszFilename, pszFilename, pfnProgress, pvUser));
/*
* Validate input.
*/
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
/*
* Forward the request to EMT(0). No need to setup a rendezvous here
* since there is no execution taking place when this call is allowed.
*/
- int rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
- pVM, pszFilename, (uintptr_t)NULL /*pStreamOps*/, (uintptr_t)NULL /*pvStreamOpsUser*/, pfnProgress, pvUser,
- false /*fTeleporting*/, false /* fSkipStateChanges */);
+ int rc = VMR3ReqCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
+ pUVM, pszFilename, (uintptr_t)NULL /*pStreamOps*/, (uintptr_t)NULL /*pvStreamOpsUser*/, pfnProgress, pvUser,
+ false /*fTeleporting*/, false /* fSkipStateChanges */);
LogFlow(("VMR3LoadFromFile: returns %Rrc\n", rc));
return rc;
}
@@ -2077,7 +2164,7 @@ VMMR3DECL(int) VMR3LoadFromFile(PVM pVM, const char *pszFilename, PFNVMPROGRESS
*
* @returns VBox status code.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM Pointer to the VM.
* @param pStreamOps The stream methods.
* @param pvStreamOpsUser The user argument to the stream methods.
* @param pfnProgress Progress callback. Optional.
@@ -2087,36 +2174,36 @@ VMMR3DECL(int) VMR3LoadFromFile(PVM pVM, const char *pszFilename, PFNVMPROGRESS
* @vmstate Created, Suspended
* @vmstateto Loading+Suspended
*/
-VMMR3DECL(int) VMR3LoadFromStream(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
+VMMR3DECL(int) VMR3LoadFromStream(PUVM pUVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
PFNVMPROGRESS pfnProgress, void *pvProgressUser)
{
- LogFlow(("VMR3LoadFromStream: pVM=%p pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p\n",
- pVM, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
+ LogFlow(("VMR3LoadFromStream: pUVM=%p pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p\n",
+ pUVM, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
/*
* Validate input.
*/
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
/*
* Forward the request to EMT(0). No need to setup a rendezvous here
* since there is no execution taking place when this call is allowed.
*/
- int rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
- pVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser,
- true /*fTeleporting*/, false /* fSkipStateChanges */);
+ int rc = VMR3ReqCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
+ pUVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser,
+ true /*fTeleporting*/, false /* fSkipStateChanges */);
LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
return rc;
}
/**
- * VMR3LoadFromFileFT for arbitrary file streams.
+ * Special version for the FT component, it skips state changes.
*
* @returns VBox status code.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The VM handle.
* @param pStreamOps The stream methods.
* @param pvStreamOpsUser The user argument to the stream methods.
* @param pfnProgress Progress callback. Optional.
@@ -2126,24 +2213,23 @@ VMMR3DECL(int) VMR3LoadFromStream(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStre
* @vmstate Created, Suspended
* @vmstateto Loading+Suspended
*/
-VMMR3DECL(int) VMR3LoadFromStreamFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser)
+VMMR3_INT_DECL(int) VMR3LoadFromStreamFT(PUVM pUVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser)
{
- LogFlow(("VMR3LoadFromStreamFT: pVM=%p pStreamOps=%p pvStreamOpsUser=%p\n",
- pVM, pStreamOps, pvStreamOpsUser));
+ LogFlow(("VMR3LoadFromStreamFT: pUVM=%p pStreamOps=%p pvStreamOpsUser=%p\n", pUVM, pStreamOps, pvStreamOpsUser));
/*
* Validate input.
*/
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
/*
* Forward the request to EMT(0). No need to setup a rendezvous here
* since there is no execution taking place when this call is allowed.
*/
- int rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
- pVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, NULL, NULL,
- true /*fTeleporting*/, true /* fSkipStateChanges */);
+ int rc = VMR3ReqCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
+ pUVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, NULL, NULL,
+ true /*fTeleporting*/, true /* fSkipStateChanges */);
LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
return rc;
}
@@ -2183,7 +2269,7 @@ static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUs
if (RT_FAILURE(rc))
return rc;
if (rc >= 7)
- SSMR3Cancel(pVM);
+ SSMR3Cancel(pVM->pUVM);
}
/*
@@ -2211,13 +2297,13 @@ static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUs
/** @todo make the state dumping at VMR3PowerOff optional. */
bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
RTLogRelPrintf("****************** Guest state at power off ******************\n");
- DBGFR3Info(pVM, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
+ DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
RTLogRelPrintf("***\n");
- DBGFR3Info(pVM, "mode", NULL, DBGFR3InfoLogRelHlp());
+ DBGFR3Info(pVM->pUVM, "mode", NULL, DBGFR3InfoLogRelHlp());
RTLogRelPrintf("***\n");
- DBGFR3Info(pVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
+ DBGFR3Info(pVM->pUVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
RTLogRelPrintf("***\n");
- DBGFR3Info(pVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
+ DBGFR3Info(pVM->pUVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
/** @todo dump guest call stack. */
#if 1 // "temporary" while debugging #1589
RTLogRelPrintf("***\n");
@@ -2267,6 +2353,7 @@ static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUs
* Off or OffLS.
*/
PDMR3PowerOff(pVM);
+ DBGFR3PowerOff(pVM);
PUVM pUVM = pVM->pUVM;
RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
@@ -2287,15 +2374,17 @@ static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUs
* @returns VBox status code. When called on EMT, this will be a strict status
* code that has to be propagated up the call stack.
*
- * @param pVM The handle of the VM to be powered off.
+ * @param pUVM The handle of the VM to be powered off.
*
* @thread Any thread.
* @vmstate Suspended, Running, Guru Meditation, Load Failure
* @vmstateto Off or OffLS
*/
-VMMR3DECL(int) VMR3PowerOff(PVM pVM)
+VMMR3DECL(int) VMR3PowerOff(PUVM pUVM)
{
- LogFlow(("VMR3PowerOff: pVM=%p\n", pVM));
+ LogFlow(("VMR3PowerOff: pUVM=%p\n", pUVM));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
/*
@@ -2324,54 +2413,34 @@ VMMR3DECL(int) VMR3PowerOff(PVM pVM)
* @vmstate Off, Created
* @vmstateto N/A
*/
-VMMR3DECL(int) VMR3Destroy(PVM pVM)
+VMMR3DECL(int) VMR3Destroy(PUVM pUVM)
{
- LogFlow(("VMR3Destroy: pVM=%p\n", pVM));
+ LogFlow(("VMR3Destroy: pUVM=%p\n", pUVM));
/*
* Validate input.
*/
- if (!pVM)
+ if (!pUVM)
return VERR_INVALID_VM_HANDLE;
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
AssertLogRelReturn(!VM_IS_EMT(pVM), VERR_VM_THREAD_IS_EMT);
/*
- * Change VM state to destroying and unlink the VM.
+ * Change VM state to destroying and aall vmR3Destroy on each of the EMTs
+ * ending with EMT(0) doing the bulk of the cleanup.
*/
int rc = vmR3TrySetState(pVM, "VMR3Destroy", 1, VMSTATE_DESTROYING, VMSTATE_OFF);
if (RT_FAILURE(rc))
return rc;
- /** @todo lock this when we start having multiple machines in a process... */
- PUVM pUVM = pVM->pUVM; AssertPtr(pUVM);
- if (g_pUVMsHead == pUVM)
- g_pUVMsHead = pUVM->pNext;
- else
- {
- PUVM pPrev = g_pUVMsHead;
- while (pPrev && pPrev->pNext != pUVM)
- pPrev = pPrev->pNext;
- AssertMsgReturn(pPrev, ("pUVM=%p / pVM=%p is INVALID!\n", pUVM, pVM), VERR_INVALID_PARAMETER);
-
- pPrev->pNext = pUVM->pNext;
- }
- pUVM->pNext = NULL;
-
- /*
- * Notify registered at destruction listeners.
- */
- vmR3AtDtor(pVM);
-
- /*
- * Call vmR3Destroy on each of the EMTs ending with EMT(0) doing the bulk
- * of the cleanup.
- */
- /* vmR3Destroy on all EMTs, ending with EMT(0). */
rc = VMR3ReqCallWait(pVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Destroy, 1, pVM);
AssertLogRelRC(rc);
- /* Wait for EMTs and destroy the UVM. */
+ /*
+ * Wait for EMTs to quit and destroy the UVM.
+ */
vmR3DestroyUVM(pUVM, 30000);
LogFlow(("VMR3Destroy: returns VINF_SUCCESS\n"));
@@ -2412,10 +2481,10 @@ DECLCALLBACK(int) vmR3Destroy(PVM pVM)
RTLogFlags(NULL, "nodisabled nobuffered");
#endif
#ifdef VBOX_WITH_STATISTICS
- STAMR3Dump(pVM, "*");
+ STAMR3Dump(pUVM, "*");
#else
LogRel(("************************* Statistics *************************\n"));
- STAMR3DumpToReleaseLog(pVM, "*");
+ STAMR3DumpToReleaseLog(pUVM, "*");
LogRel(("********************* End of statistics **********************\n"));
#endif
@@ -2425,26 +2494,28 @@ DECLCALLBACK(int) vmR3Destroy(PVM pVM)
int rc = TMR3Term(pVM);
AssertRC(rc);
#ifdef VBOX_WITH_DEBUGGER
- rc = DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
+ rc = DBGCTcpTerminate(pUVM, pUVM->vm.s.pvDBGC);
pUVM->vm.s.pvDBGC = NULL;
#endif
AssertRC(rc);
rc = FTMR3Term(pVM);
AssertRC(rc);
- rc = DBGFR3Term(pVM);
- AssertRC(rc);
rc = PDMR3Term(pVM);
AssertRC(rc);
+ rc = DBGFR3Term(pVM);
+ AssertRC(rc);
rc = IEMR3Term(pVM);
AssertRC(rc);
rc = EMR3Term(pVM);
AssertRC(rc);
rc = IOMR3Term(pVM);
AssertRC(rc);
+#ifdef VBOX_WITH_RAW_MODE
rc = CSAMR3Term(pVM);
AssertRC(rc);
rc = PATMR3Term(pVM);
AssertRC(rc);
+#endif
rc = TRPMR3Term(pVM);
AssertRC(rc);
rc = SELMR3Term(pVM);
@@ -2453,7 +2524,7 @@ DECLCALLBACK(int) vmR3Destroy(PVM pVM)
rc = REMR3Term(pVM);
AssertRC(rc);
#endif
- rc = HWACCMR3Term(pVM);
+ rc = HMR3Term(pVM);
AssertRC(rc);
rc = PGMR3Term(pVM);
AssertRC(rc);
@@ -2462,7 +2533,7 @@ DECLCALLBACK(int) vmR3Destroy(PVM pVM)
rc = CPUMR3Term(pVM);
AssertRC(rc);
SSMR3Term(pVM);
- rc = PDMR3CritSectTerm(pVM);
+ rc = PDMR3CritSectBothTerm(pVM);
AssertRC(rc);
rc = MMR3Term(pVM);
AssertRC(rc);
@@ -2639,130 +2710,6 @@ static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait)
/**
- * Enumerates the VMs in this process.
- *
- * @returns Pointer to the next VM.
- * @returns NULL when no more VMs.
- * @param pVMPrev The previous VM
- * Use NULL to start the enumeration.
- */
-VMMR3DECL(PVM) VMR3EnumVMs(PVM pVMPrev)
-{
- /*
- * This is quick and dirty. It has issues with VM being
- * destroyed during the enumeration.
- */
- PUVM pNext;
- if (pVMPrev)
- pNext = pVMPrev->pUVM->pNext;
- else
- pNext = g_pUVMsHead;
- return pNext ? pNext->pVM : NULL;
-}
-
-
-/**
- * Registers an at VM destruction callback.
- *
- * @returns VBox status code.
- * @param pfnAtDtor Pointer to callback.
- * @param pvUser User argument.
- */
-VMMR3DECL(int) VMR3AtDtorRegister(PFNVMATDTOR pfnAtDtor, void *pvUser)
-{
- /*
- * Check if already registered.
- */
- VM_ATDTOR_LOCK();
- PVMATDTOR pCur = g_pVMAtDtorHead;
- while (pCur)
- {
- if (pfnAtDtor == pCur->pfnAtDtor)
- {
- VM_ATDTOR_UNLOCK();
- AssertMsgFailed(("Already registered at destruction callback %p!\n", pfnAtDtor));
- return VERR_INVALID_PARAMETER;
- }
-
- /* next */
- pCur = pCur->pNext;
- }
- VM_ATDTOR_UNLOCK();
-
- /*
- * Allocate new entry.
- */
- PVMATDTOR pVMAtDtor = (PVMATDTOR)RTMemAlloc(sizeof(*pVMAtDtor));
- if (!pVMAtDtor)
- return VERR_NO_MEMORY;
-
- VM_ATDTOR_LOCK();
- pVMAtDtor->pfnAtDtor = pfnAtDtor;
- pVMAtDtor->pvUser = pvUser;
- pVMAtDtor->pNext = g_pVMAtDtorHead;
- g_pVMAtDtorHead = pVMAtDtor;
- VM_ATDTOR_UNLOCK();
-
- return VINF_SUCCESS;
-}
-
-
-/**
- * Deregisters an at VM destruction callback.
- *
- * @returns VBox status code.
- * @param pfnAtDtor Pointer to callback.
- */
-VMMR3DECL(int) VMR3AtDtorDeregister(PFNVMATDTOR pfnAtDtor)
-{
- /*
- * Find it, unlink it and free it.
- */
- VM_ATDTOR_LOCK();
- PVMATDTOR pPrev = NULL;
- PVMATDTOR pCur = g_pVMAtDtorHead;
- while (pCur)
- {
- if (pfnAtDtor == pCur->pfnAtDtor)
- {
- if (pPrev)
- pPrev->pNext = pCur->pNext;
- else
- g_pVMAtDtorHead = pCur->pNext;
- pCur->pNext = NULL;
- VM_ATDTOR_UNLOCK();
-
- RTMemFree(pCur);
- return VINF_SUCCESS;
- }
-
- /* next */
- pPrev = pCur;
- pCur = pCur->pNext;
- }
- VM_ATDTOR_UNLOCK();
-
- return VERR_INVALID_PARAMETER;
-}
-
-
-/**
- * Walks the list of at VM destructor callbacks.
- * @param pVM The VM which is about to be destroyed.
- */
-static void vmR3AtDtor(PVM pVM)
-{
- /*
- * Find it, unlink it and free it.
- */
- VM_ATDTOR_LOCK();
- for (PVMATDTOR pCur = g_pVMAtDtorHead; pCur; pCur = pCur->pNext)
- pCur->pfnAtDtor(pVM, pCur->pvUser);
- VM_ATDTOR_UNLOCK();
-}
-
-
-/**
* Worker which checks integrity of some internal structures.
* This is yet another attempt to track down that AVL tree crash.
*/
@@ -2836,19 +2783,12 @@ static DECLCALLBACK(VBOXSTRICTRC) vmR3Reset(PVM pVM, PVMCPU pVCpu, void *pvUser)
*/
if (pVCpu->idCpu == 0)
{
+#ifdef VBOX_WITH_RAW_MODE
PATMR3Reset(pVM);
CSAMR3Reset(pVM);
- PGMR3Reset(pVM); /* We clear VM RAM in PGMR3Reset. It's vital PDMR3Reset is executed
- * _afterwards_. E.g. ACPI sets up RAM tables during init/reset. */
-/** @todo PGMR3Reset should be called after PDMR3Reset really, because we'll trash OS <-> hardware
- * communication structures residing in RAM when done in the other order. I.e. the device must be
- * quiesced first, then we clear the memory and plan tables. Probably have to make these things
- * explicit in some way, some memory setup pass or something.
- * (Example: DevAHCI may assert if memory is zeroed before it has read the FIS.)
- *
- * @bugref{4467}
- */
+#endif
PDMR3Reset(pVM);
+ PGMR3Reset(pVM);
SELMR3Reset(pVM);
TRPMR3Reset(pVM);
#ifdef VBOX_WITH_REM
@@ -2856,23 +2796,25 @@ static DECLCALLBACK(VBOXSTRICTRC) vmR3Reset(PVM pVM, PVMCPU pVCpu, void *pvUser)
#endif
IOMR3Reset(pVM);
CPUMR3Reset(pVM);
- }
- CPUMR3ResetCpu(pVCpu);
- if (pVCpu->idCpu == 0)
- {
TMR3Reset(pVM);
EMR3Reset(pVM);
- HWACCMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
+ HMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
#ifdef LOG_ENABLED
/*
* Debug logging.
*/
RTLogPrintf("\n\nThe VM was reset:\n");
- DBGFR3Info(pVM, "cpum", "verbose", NULL);
+ DBGFR3Info(pVM->pUVM, "cpum", "verbose", NULL);
#endif
/*
+ * Do memory setup.
+ */
+ PGMR3MemSetup(pVM, true /*fAtReset*/);
+ PDMR3MemSetup(pVM, true /*fAtReset*/);
+
+ /*
* Since EMT(0) is the last to go thru here, it will advance the state.
* When a live save is active, we will move on to SuspendingLS but
* leave it for VMR3Reset to do the actual suspending due to deadlock risks.
@@ -2914,13 +2856,23 @@ static DECLCALLBACK(VBOXSTRICTRC) vmR3Reset(PVM pVM, PVMCPU pVCpu, void *pvUser)
* Reset the current VM.
*
* @returns VBox status code.
- * @param pVM VM to reset.
+ * @param pUVM The VM to reset.
*/
-VMMR3DECL(int) VMR3Reset(PVM pVM)
+VMMR3DECL(int) VMR3Reset(PUVM pUVM)
{
LogFlow(("VMR3Reset:\n"));
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ if (pVM->vm.s.fPowerOffInsteadOfReset)
+ {
+ if ( pUVM->pVmm2UserMethods
+ && pUVM->pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff)
+ pUVM->pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff(pUVM->pVmm2UserMethods, pUVM);
+ return VMR3PowerOff(pUVM);
+ }
+
/*
* Gather all the EMTs to make sure there are no races before
* changing the VM state.
@@ -3350,7 +3302,7 @@ static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmSt
for (PVMATSTATE pCur = pUVM->vm.s.pAtState; pCur; pCur = pCur->pNext)
{
- pCur->pfnAtState(pVM, enmStateNew, enmStateOld, pCur->pvUser);
+ pCur->pfnAtState(pUVM, enmStateNew, enmStateOld, pCur->pvUser);
if ( enmStateNew != VMSTATE_DESTROYING
&& pVM->enmVMState == VMSTATE_DESTROYING)
break;
@@ -3522,7 +3474,7 @@ void vmR3SetGuruMeditation(PVM pVM)
else if (enmStateCur == VMSTATE_RUNNING_LS)
{
vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION_LS, VMSTATE_RUNNING_LS);
- SSMR3Cancel(pVM);
+ SSMR3Cancel(pUVM);
}
RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
@@ -3550,7 +3502,7 @@ void vmR3SetTerminated(PVM pVM)
* @param pVM Pointer to the VM.
* @thread Any thread.
*/
-VMMR3DECL(bool) VMR3TeleportedAndNotFullyResumedYet(PVM pVM)
+VMMR3_INT_DECL(bool) VMR3TeleportedAndNotFullyResumedYet(PVM pVM)
{
VM_ASSERT_VALID_EXT_RETURN(pVM, false);
return pVM->vm.s.fTeleportedAndNotFullyResumedYet;
@@ -3564,12 +3516,12 @@ VMMR3DECL(bool) VMR3TeleportedAndNotFullyResumedYet(PVM pVM)
* state callback.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The VM handle.
* @param pfnAtState Pointer to callback.
* @param pvUser User argument.
* @thread Any.
*/
-VMMR3DECL(int) VMR3AtStateRegister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
+VMMR3DECL(int) VMR3AtStateRegister(PUVM pUVM, PFNVMATSTATE pfnAtState, void *pvUser)
{
LogFlow(("VMR3AtStateRegister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
@@ -3577,12 +3529,11 @@ VMMR3DECL(int) VMR3AtStateRegister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUse
* Validate input.
*/
AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
/*
* Allocate a new record.
*/
- PUVM pUVM = pVM->pUVM;
PVMATSTATE pNew = (PVMATSTATE)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
if (!pNew)
return VERR_NO_MEMORY;
@@ -3606,12 +3557,12 @@ VMMR3DECL(int) VMR3AtStateRegister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUse
* Deregisters a VM state change callback.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The VM handle.
* @param pfnAtState Pointer to callback.
* @param pvUser User argument.
* @thread Any.
*/
-VMMR3DECL(int) VMR3AtStateDeregister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
+VMMR3DECL(int) VMR3AtStateDeregister(PUVM pUVM, PFNVMATSTATE pfnAtState, void *pvUser)
{
LogFlow(("VMR3AtStateDeregister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
@@ -3619,9 +3570,8 @@ VMMR3DECL(int) VMR3AtStateDeregister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvU
* Validate input.
*/
AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
- PUVM pUVM = pVM->pUVM;
RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
/*
@@ -3676,28 +3626,12 @@ VMMR3DECL(int) VMR3AtStateDeregister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvU
* Registers a VM error callback.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
- * @param pfnAtError Pointer to callback.
- * @param pvUser User argument.
- * @thread Any.
- */
-VMMR3DECL(int) VMR3AtErrorRegister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
-{
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
- return VMR3AtErrorRegisterU(pVM->pUVM, pfnAtError, pvUser);
-}
-
-
-/**
- * Registers a VM error callback.
- *
- * @returns VBox status code.
- * @param pUVM Pointer to the VM.
+ * @param pUVM The VM handle.
* @param pfnAtError Pointer to callback.
* @param pvUser User argument.
* @thread Any.
*/
-VMMR3DECL(int) VMR3AtErrorRegisterU(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
+VMMR3DECL(int) VMR3AtErrorRegister(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
{
LogFlow(("VMR3AtErrorRegister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
@@ -3733,12 +3667,12 @@ VMMR3DECL(int) VMR3AtErrorRegisterU(PUVM pUVM, PFNVMATERROR pfnAtError, void *
* Deregisters a VM error callback.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The VM handle.
* @param pfnAtError Pointer to callback.
* @param pvUser User argument.
* @thread Any.
*/
-VMMR3DECL(int) VMR3AtErrorDeregister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
+VMMR3DECL(int) VMR3AtErrorDeregister(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
{
LogFlow(("VMR3AtErrorDeregister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
@@ -3746,9 +3680,8 @@ VMMR3DECL(int) VMR3AtErrorDeregister(PVM pVM, PFNVMATERROR pfnAtError, void *pvU
* Validate input.
*/
AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
- PUVM pUVM = pVM->pUVM;
RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
/*
@@ -3806,7 +3739,7 @@ static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_PO
{
va_list va;
va_start(va, pszFormat);
- pCur->pfnAtError(pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
+ pCur->pfnAtError(pVM->pUVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
va_end(va);
}
@@ -3818,7 +3751,7 @@ static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_PO
* @param pVM Pointer to the VM.
* @thread EMT.
*/
-VMMR3DECL(void) VMR3SetErrorWorker(PVM pVM)
+VMMR3_INT_DECL(void) VMR3SetErrorWorker(PVM pVM)
{
VM_ASSERT_EMT(pVM);
AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetErrorV! Congrats!\n"));
@@ -3866,24 +3799,9 @@ VMMR3DECL(void) VMR3SetErrorWorker(PVM pVM)
* This can be used avoid double error messages.
*
* @returns The error count.
- * @param pVM Pointer to the VM.
+ * @param pUVM The VM handle.
*/
-VMMR3DECL(uint32_t) VMR3GetErrorCount(PVM pVM)
-{
- AssertPtrReturn(pVM, 0);
- return VMR3GetErrorCountU(pVM->pUVM);
-}
-
-
-/**
- * Gets the number of errors raised via VMSetError.
- *
- * This can be used avoid double error messages.
- *
- * @returns The error count.
- * @param pVM Pointer to the VM.
- */
-VMMR3DECL(uint32_t) VMR3GetErrorCountU(PUVM pUVM)
+VMMR3_INT_DECL(uint32_t) VMR3GetErrorCount(PUVM pUVM)
{
AssertPtrReturn(pUVM, 0);
AssertReturn(pUVM->u32Magic == UVM_MAGIC, 0);
@@ -3960,7 +3878,7 @@ DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char
{
va_list va2;
va_copy(va2, *pArgs);
- pCur->pfnAtError(pUVM->pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
+ pCur->pfnAtError(pUVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
va_end(va2);
fCalledSomeone = true;
}
@@ -3969,6 +3887,53 @@ DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char
/**
+ * Sets the error message.
+ *
+ * @returns rc. Meaning you can do:
+ * @code
+ * return VM_SET_ERROR_U(pUVM, VERR_OF_YOUR_CHOICE, "descriptive message");
+ * @endcode
+ * @param pUVM The user mode VM handle.
+ * @param rc VBox status code.
+ * @param RT_SRC_POS_DECL Use RT_SRC_POS.
+ * @param pszFormat Error message format string.
+ * @param ... Error message arguments.
+ * @thread Any
+ */
+VMMR3DECL(int) VMR3SetError(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
+{
+ va_list va;
+ va_start(va, pszFormat);
+ int rcRet = VMR3SetErrorV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, va);
+ va_end(va);
+ return rcRet;
+}
+
+
+/**
+ * Sets the error message.
+ *
+ * @returns rc. Meaning you can do:
+ * @code
+ * return VM_SET_ERROR_U(pUVM, VERR_OF_YOUR_CHOICE, "descriptive message");
+ * @endcode
+ * @param pUVM The user mode VM handle.
+ * @param rc VBox status code.
+ * @param RT_SRC_POS_DECL Use RT_SRC_POS.
+ * @param pszFormat Error message format string.
+ * @param va Error message arguments.
+ * @thread Any
+ */
+VMMR3DECL(int) VMR3SetErrorV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list va)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
+ return VMSetErrorV(pUVM->pVM, rc, pszFile, iLine, pszFunction, pszFormat, va);
+}
+
+
+
+/**
* Registers a VM runtime error callback.
*
* @returns VBox status code.
@@ -3977,7 +3942,7 @@ DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char
* @param pvUser User argument.
* @thread Any.
*/
-VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
+VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PUVM pUVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
{
LogFlow(("VMR3AtRuntimeErrorRegister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
@@ -3985,12 +3950,11 @@ VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRu
* Validate input.
*/
AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
/*
* Allocate a new record.
*/
- PUVM pUVM = pVM->pUVM;
PVMATRUNTIMEERROR pNew = (PVMATRUNTIMEERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
if (!pNew)
return VERR_NO_MEMORY;
@@ -4014,12 +3978,12 @@ VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRu
* Deregisters a VM runtime error callback.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param pfnAtRuntimeError Pointer to callback.
* @param pvUser User argument.
* @thread Any.
*/
-VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
+VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PUVM pUVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
{
LogFlow(("VMR3AtRuntimeErrorDeregister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
@@ -4027,9 +3991,8 @@ VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PVM pVM, PFNVMATRUNTIMEERROR pfnAt
* Validate input.
*/
AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
- VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
- PUVM pUVM = pVM->pUVM;
RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
/*
@@ -4107,7 +4070,7 @@ static DECLCALLBACK(VBOXSTRICTRC) vmR3SetRuntimeErrorChangeState(PVM pVM, PVMCPU
if (RT_FAILURE(rc))
return rc;
if (rc == 2)
- SSMR3Cancel(pVM);
+ SSMR3Cancel(pVM->pUVM);
VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
}
@@ -4133,6 +4096,7 @@ static DECLCALLBACK(VBOXSTRICTRC) vmR3SetRuntimeErrorChangeState(PVM pVM, PVMCPU
static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
{
LogRel(("VM: Raising runtime error '%s' (fFlags=%#x)\n", pszErrorId, fFlags));
+ PUVM pUVM = pVM->pUVM;
/*
* Take actions before the call.
@@ -4142,21 +4106,20 @@ static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszEr
rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
vmR3SetRuntimeErrorChangeState, NULL);
else if (fFlags & VMSETRTERR_FLAGS_SUSPEND)
- rc = VMR3Suspend(pVM);
+ rc = VMR3Suspend(pUVM, VMSUSPENDREASON_RUNTIME_ERROR);
else
rc = VINF_SUCCESS;
/*
* Do the callback round.
*/
- PUVM pUVM = pVM->pUVM;
RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
for (PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError; pCur; pCur = pCur->pNext)
{
va_list va;
va_copy(va, *pVa);
- pCur->pfnAtRuntimeError(pVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
+ pCur->pfnAtRuntimeError(pUVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
va_end(va);
}
RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
@@ -4188,7 +4151,7 @@ static int vmR3SetRuntimeErrorCommonF(PVM pVM, uint32_t fFlags, const char *pszE
* @param pVM Pointer to the VM.
* @thread EMT.
*/
-VMMR3DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
+VMMR3_INT_DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
{
VM_ASSERT_EMT(pVM);
AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetRuntimeErrorV! Congrats!\n"));
@@ -4286,11 +4249,11 @@ DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *psz
* This can be used avoid double error messages.
*
* @returns The runtime error count.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
*/
-VMMR3DECL(uint32_t) VMR3GetRuntimeErrorCount(PVM pVM)
+VMMR3_INT_DECL(uint32_t) VMR3GetRuntimeErrorCount(PUVM pUVM)
{
- return pVM->pUVM->vm.s.cRuntimeErrors;
+ return pUVM->vm.s.cRuntimeErrors;
}
@@ -4301,7 +4264,7 @@ VMMR3DECL(uint32_t) VMR3GetRuntimeErrorCount(PVM pVM)
*
* @param pVM Pointer to the VM.
*/
-VMMR3DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
+VMMR3_INT_DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
{
PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
return pUVCpu
@@ -4350,28 +4313,10 @@ VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
* Returns the handle of the current EMT VMCPU thread.
*
* @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
- * @param pVM Pointer to the VM.
- * @thread EMT
- */
-VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PVM pVM)
-{
- PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
-
- if (!pUVCpu)
- return NIL_RTTHREAD;
-
- return pUVCpu->vm.s.ThreadEMT;
-}
-
-
-/**
- * Returns the handle of the current EMT VMCPU thread.
- *
- * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @thread EMT
*/
-VMMR3DECL(RTTHREAD) VMR3GetVMCPUThreadU(PUVM pUVM)
+VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PUVM pUVM)
{
PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
@@ -4383,20 +4328,22 @@ VMMR3DECL(RTTHREAD) VMR3GetVMCPUThreadU(PUVM pUVM)
/**
- * Return the package and core id of a CPU.
+ * Return the package and core ID of a CPU.
*
* @returns VBOX status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu Virtual CPU to get the ID from.
* @param pidCpuCore Where to store the core ID of the virtual CPU.
* @param pidCpuPackage Where to store the package ID of the virtual CPU.
*
*/
-VMMR3DECL(int) VMR3GetCpuCoreAndPackageIdFromCpuId(PVM pVM, VMCPUID idCpu, uint32_t *pidCpuCore, uint32_t *pidCpuPackage)
+VMMR3DECL(int) VMR3GetCpuCoreAndPackageIdFromCpuId(PUVM pUVM, VMCPUID idCpu, uint32_t *pidCpuCore, uint32_t *pidCpuPackage)
{
/*
* Validate input.
*/
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
AssertPtrReturn(pidCpuCore, VERR_INVALID_POINTER);
AssertPtrReturn(pidCpuPackage, VERR_INVALID_POINTER);
@@ -4438,12 +4385,12 @@ static DECLCALLBACK(int) vmR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
* even without this.
*/
Log(("vmR3HotUnplugCpu for VCPU %u\n", idCpu));
- PGMR3ResetUnpluggedCpu(pVM, pVCpu);
+ PGMR3ResetCpu(pVM, pVCpu);
PDMR3ResetCpu(pVCpu);
TRPMR3ResetCpu(pVCpu);
- CPUMR3ResetCpu(pVCpu);
+ CPUMR3ResetCpu(pVM, pVCpu);
EMR3ResetCpu(pVCpu);
- HWACCMR3ResetCpu(pVCpu);
+ HMR3ResetCpu(pVCpu);
return VINF_EM_WAIT_SIPI;
}
@@ -4452,11 +4399,13 @@ static DECLCALLBACK(int) vmR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
* Hot-unplugs a CPU from the guest.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu Virtual CPU to perform the hot unplugging operation on.
*/
-VMMR3DECL(int) VMR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
+VMMR3DECL(int) VMR3HotUnplugCpu(PUVM pUVM, VMCPUID idCpu)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
@@ -4464,7 +4413,7 @@ VMMR3DECL(int) VMR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
* broadcast requests. Just note down somewhere that the CPU is
* offline and send it to SPIP wait. Maybe modify VMCPUSTATE and push
* it out of the EM loops when offline. */
- return VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmR3HotUnplugCpu, 2, pVM, idCpu);
+ return VMR3ReqCallNoWaitU(pUVM, idCpu, (PFNRT)vmR3HotUnplugCpu, 2, pVM, idCpu);
}
@@ -4472,11 +4421,13 @@ VMMR3DECL(int) VMR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
* Hot-plugs a CPU on the guest.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idCpu Virtual CPU to perform the hot plugging operation on.
*/
-VMMR3DECL(int) VMR3HotPlugCpu(PVM pVM, VMCPUID idCpu)
+VMMR3DECL(int) VMR3HotPlugCpu(PUVM pUVM, VMCPUID idCpu)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
@@ -4493,8 +4444,10 @@ VMMR3DECL(int) VMR3HotPlugCpu(PVM pVM, VMCPUID idCpu)
* @param uCpuExecutionCap New CPU execution cap in precent, 1-100. Where
* 100 is max performance (default).
*/
-VMMR3DECL(int) VMR3SetCpuExecutionCap(PVM pVM, uint32_t uCpuExecutionCap)
+VMMR3DECL(int) VMR3SetCpuExecutionCap(PUVM pUVM, uint32_t uCpuExecutionCap)
{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
AssertReturn(uCpuExecutionCap > 0 && uCpuExecutionCap <= 100, VERR_INVALID_PARAMETER);
@@ -4504,3 +4457,23 @@ VMMR3DECL(int) VMR3SetCpuExecutionCap(PVM pVM, uint32_t uCpuExecutionCap)
return VINF_SUCCESS;
}
+
+/**
+ * Control whether the VM should power off when resetting.
+ *
+ * @returns VBox status code.
+ * @param pUVM The user mode VM handle.
+ * @param fPowerOffInsteadOfReset Flag whether the VM should power off when
+ * resetting.
+ */
+VMMR3DECL(int) VMR3SetPowerOffInsteadOfReset(PUVM pUVM, bool fPowerOffInsteadOfReset)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
+
+ /* Note: not called from EMT. */
+ pVM->vm.s.fPowerOffInsteadOfReset = fPowerOffInsteadOfReset;
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/VMM/VMMR3/VMEmt.cpp b/src/VBox/VMM/VMMR3/VMEmt.cpp
index 481dbcf1..f52b3c78 100644
--- a/src/VBox/VMM/VMMR3/VMEmt.cpp
+++ b/src/VBox/VMM/VMMR3/VMEmt.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -160,7 +160,7 @@ int vmR3EmulationThreadWithId(RTTHREAD ThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu
break;
}
- if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
+ if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
{
rc = VMMR3EmtRendezvousFF(pVM, &pVM->aCpus[idCpu]);
Log(("vmR3EmulationThread: Rendezvous rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
@@ -181,7 +181,7 @@ int vmR3EmulationThreadWithId(RTTHREAD ThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu
rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/);
Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
}
- else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
+ else if (VM_FF_IS_SET(pVM, VM_FF_DBGF))
{
/*
* Service the debugger request.
@@ -189,12 +189,12 @@ int vmR3EmulationThreadWithId(RTTHREAD ThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu
rc = DBGFR3VMMForcedAction(pVM);
Log(("vmR3EmulationThread: Dbg rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
}
- else if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET))
+ else if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
{
/*
* Service a delayed reset request.
*/
- rc = VMR3Reset(pVM);
+ rc = VMR3Reset(pVM->pUVM);
VM_FF_CLEAR(pVM, VM_FF_RESET);
Log(("vmR3EmulationThread: Reset rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
}
@@ -337,13 +337,13 @@ static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVMCPU pUVCpu, const uint32_t fMask,
TMR3TimerQueuesDo(pVM);
uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
- if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, fMask))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, fMask))
break;
uint64_t u64NanoTS;
TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
- if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, fMask))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, fMask))
break;
/*
@@ -536,8 +536,8 @@ static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVMCPU pUVCpu, const uint32_t fMas
TMR3TimerQueuesDo(pVM);
uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
- if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, fMask))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, fMask))
break;
/*
@@ -545,8 +545,8 @@ static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVMCPU pUVCpu, const uint32_t fMas
*/
uint64_t u64NanoTS;
TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
- if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, fMask))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, fMask))
break;
/*
@@ -685,8 +685,8 @@ static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVMCPU pUVCpu, const uint32_t fMas
TMR3TimerQueuesDo(pVM);
uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
- if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, fMask))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, fMask))
break;
/*
@@ -695,8 +695,8 @@ static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVMCPU pUVCpu, const uint32_t fMas
//u64NowLog = RTTimeNanoTS();
uint64_t u64Delta;
uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta);
- if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, fMask))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, fMask))
break;
/*
@@ -705,8 +705,8 @@ static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVMCPU pUVCpu, const uint32_t fMas
if (u64Delta >= pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg)
{
VMMR3YieldStop(pVM);
- if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, fMask))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, fMask))
break;
//RTLogPrintf("loop=%-3d u64GipTime=%'llu / %'llu now=%'llu / %'llu\n", cLoops, u64GipTime, u64Delta, u64NowLog, u64GipTime - u64NowLog);
@@ -773,8 +773,8 @@ static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVMCPU pUVCpu)
/*
* Check Relevant FFs.
*/
- if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
break;
/*
@@ -857,8 +857,8 @@ static DECLCALLBACK(int) vmR3BootstrapWait(PUVMCPU pUVCpu)
break;
if ( pUVCpu->pVM
- && ( VM_FF_ISPENDING(pUVCpu->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
- || VMCPU_FF_ISPENDING(VMMGetCpu(pUVCpu->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
+ && ( VM_FF_IS_PENDING(pUVCpu->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
+ || VMCPU_FF_IS_PENDING(VMMGetCpu(pUVCpu->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
)
)
break;
@@ -919,8 +919,8 @@ static DECLCALLBACK(int) vmR3DefaultWait(PUVMCPU pUVCpu)
/*
* Check Relevant FFs.
*/
- if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
break;
/*
@@ -1001,8 +1001,9 @@ static const struct VMHALTMETHODDESC
*
* @param pUVM Pointer to the user mode VM structure.
* @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
+ * @internal
*/
-VMMR3DECL(void) VMR3NotifyGlobalFFU(PUVM pUVM, uint32_t fFlags)
+VMMR3_INT_DECL(void) VMR3NotifyGlobalFFU(PUVM pUVM, uint32_t fFlags)
{
LogFlow(("VMR3NotifyGlobalFFU:\n"));
uint32_t iHaldMethod = pUVM->vm.s.iHaltMethod;
@@ -1023,8 +1024,9 @@ VMMR3DECL(void) VMR3NotifyGlobalFFU(PUVM pUVM, uint32_t fFlags)
*
* @param pUVM Pointer to the user mode VM structure.
* @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
+ * @internal
*/
-VMMR3DECL(void) VMR3NotifyCpuFFU(PUVMCPU pUVCpu, uint32_t fFlags)
+VMMR3_INT_DECL(void) VMR3NotifyCpuFFU(PUVMCPU pUVCpu, uint32_t fFlags)
{
PUVM pUVM = pUVCpu->pUVM;
@@ -1043,8 +1045,9 @@ VMMR3DECL(void) VMR3NotifyCpuFFU(PUVMCPU pUVCpu, uint32_t fFlags)
* @param pVCpu Pointer to the VMCPU.
* @param fIgnoreInterrupts If set the VM_FF_INTERRUPT flags is ignored.
* @thread The emulation thread.
+ * @internal
*/
-VMMR3DECL(int) VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, bool fIgnoreInterrupts)
+VMMR3_INT_DECL(int) VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, bool fIgnoreInterrupts)
{
LogFlow(("VMR3WaitHalted: fIgnoreInterrupts=%d\n", fIgnoreInterrupts));
@@ -1054,8 +1057,8 @@ VMMR3DECL(int) VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, bool fIgnoreInterrupts)
const uint32_t fMask = !fIgnoreInterrupts
? VMCPU_FF_EXTERNAL_HALTED_MASK
: VMCPU_FF_EXTERNAL_HALTED_MASK & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC);
- if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, fMask))
+ if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, fMask))
{
LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x FFCPU %#x)\n", pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
return VINF_SUCCESS;
@@ -1095,7 +1098,7 @@ VMMR3DECL(int) VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, bool fIgnoreInterrupts)
/*
* Do the halt.
*/
- Assert(VMCPU_GET_STATE(pVCpu) == VMCPUSTATE_STARTED);
+ VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED);
PUVM pUVM = pUVCpu->pUVM;
int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVCpu, fMask, u64Now);
@@ -1122,8 +1125,9 @@ VMMR3DECL(int) VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, bool fIgnoreInterrupts)
* case an appropriate status code is returned.
* @param pUVCpu Pointer to the user mode VMCPU structure.
* @thread The emulation thread.
+ * @internal
*/
-VMMR3DECL(int) VMR3WaitU(PUVMCPU pUVCpu)
+VMMR3_INT_DECL(int) VMR3WaitU(PUVMCPU pUVCpu)
{
LogFlow(("VMR3WaitU:\n"));
@@ -1134,8 +1138,8 @@ VMMR3DECL(int) VMR3WaitU(PUVMCPU pUVCpu)
PVMCPU pVCpu = pUVCpu->pVCpu;
if ( pVM
- && ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
- || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
+ && ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
+ || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
)
)
{
diff --git a/src/VBox/VMM/VMMR3/VMM.cpp b/src/VBox/VMM/VMMR3/VMM.cpp
index 70249cd3..c0a14008 100644
--- a/src/VBox/VMM/VMMR3/VMM.cpp
+++ b/src/VBox/VMM/VMMR3/VMM.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -80,6 +80,7 @@
#include <VBox/vmm/cfgm.h>
#include <VBox/vmm/pdmqueue.h>
#include <VBox/vmm/pdmcritsect.h>
+#include <VBox/vmm/pdmcritsectrw.h>
#include <VBox/vmm/pdmapi.h>
#include <VBox/vmm/cpum.h>
#include <VBox/vmm/mm.h>
@@ -95,16 +96,17 @@
# include <VBox/vmm/rem.h>
#endif
#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/ftm.h>
#include <VBox/vmm/tm.h>
#include "VMMInternal.h"
#include "VMMSwitcher.h"
#include <VBox/vmm/vm.h>
-#include <VBox/vmm/ftm.h>
+#include <VBox/vmm/uvm.h>
#include <VBox/err.h>
#include <VBox/param.h>
#include <VBox/version.h>
-#include <VBox/vmm/hwaccm.h>
+#include <VBox/vmm/hm.h>
#include <iprt/assert.h>
#include <iprt/alloc.h>
#include <iprt/asm.h>
@@ -209,9 +211,6 @@ VMMR3_INT_DECL(int) VMMR3Init(PVM pVM)
rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousDoneCaller);
AssertRCReturn(rc, rc);
- /* GC switchers are enabled by default. Turned off by HWACCM. */
- pVM->vmm.s.fSwitcherDisabled = false;
-
/*
* Register the saved state data unit.
*/
@@ -313,7 +312,7 @@ static int vmmR3InitStacks(PVM pVM)
#endif
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
/* MMHyperR3ToR0 returns R3 when not doing hardware assisted virtualization. */
- if (!VMMIsHwVirtExtForced(pVM))
+ if (!HMIsEnabled(pVM))
pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack = NIL_RTR0PTR;
else
#endif
@@ -348,11 +347,14 @@ static int vmmR3InitLoggers(PVM pVM)
PRTLOGGER pLogger = RTLogDefaultInstance();
if (pLogger)
{
- pVM->vmm.s.cbRCLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pLogger->cGroups]);
- rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCLoggerR3);
- if (RT_FAILURE(rc))
- return rc;
- pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
+ if (!HMIsEnabled(pVM))
+ {
+ pVM->vmm.s.cbRCLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pLogger->cGroups]);
+ rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCLoggerR3);
+ if (RT_FAILURE(rc))
+ return rc;
+ pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
+ }
# ifdef VBOX_WITH_R0_LOGGING
size_t const cbLogger = RTLogCalcSizeForR0(pLogger->cGroups, 0);
@@ -376,14 +378,17 @@ static int vmmR3InitLoggers(PVM pVM)
/*
* Allocate RC release logger instances (finalized in the relocator).
*/
- PRTLOGGER pRelLogger = RTLogRelDefaultInstance();
- if (pRelLogger)
+ if (!HMIsEnabled(pVM))
{
- pVM->vmm.s.cbRCRelLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pRelLogger->cGroups]);
- rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCRelLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCRelLoggerR3);
- if (RT_FAILURE(rc))
- return rc;
- pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
+ PRTLOGGER pRelLogger = RTLogRelDefaultInstance();
+ if (pRelLogger)
+ {
+ pVM->vmm.s.cbRCRelLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pRelLogger->cGroups]);
+ rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCRelLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCRelLoggerR3);
+ if (RT_FAILURE(rc))
+ return rc;
+ pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
+ }
}
#endif /* VBOX_WITH_RC_RELEASE_LOGGING */
return VINF_SUCCESS;
@@ -447,7 +452,7 @@ static void vmmR3InitRegisterStats(PVM pVM)
STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMChangeMode, STAMTYPE_COUNTER, "/VMM/RZRet/PGMChangeMode", STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMFlushPending, STAMTYPE_COUNTER, "/VMM/RZRet/PGMFlushPending", STAMUNIT_OCCURENCES, "Number of VINF_PGM_POOL_FLUSH_PENDING returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest, STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
- STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR, STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR", STAMUNIT_OCCURENCES, "Number of VINF_EM_HWACCM_PATCH_TPR_INSTR returns.");
+ STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR, STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR", STAMUNIT_OCCURENCES, "Number of VINF_EM_HM_PATCH_TPR_INSTR returns.");
STAM_REG(pVM, &pVM->vmm.s.StatRZRetCallRing3, STAMTYPE_COUNTER, "/VMM/RZCallR3/Misc", STAMUNIT_OCCURENCES, "Number of Other ring-3 calls.");
STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMLock, STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMLock", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PDM_LOCK calls.");
STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMCritSectEnter, STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMCritSectEnter", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PDM_CRITSECT_ENTER calls.");
@@ -505,7 +510,8 @@ VMMR3_INT_DECL(int) VMMR3InitR0(PVM pVM)
//rc = VERR_GENERAL_FAILURE;
rc = VINF_SUCCESS;
#else
- rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT, VMMGetSvnRev(), NULL);
+ rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT,
+ RT_MAKE_U64(VMMGetSvnRev(), vmmGetBuildType()), NULL);
#endif
/*
* Flush the logs.
@@ -529,10 +535,18 @@ VMMR3_INT_DECL(int) VMMR3InitR0(PVM pVM)
if (RT_SUCCESS(rc))
rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
}
+
+ /* Log whether thread-context hooks are used (on Linux this can depend on how the kernel is configured). */
+ if (pVM->aCpus[0].vmm.s.hR0ThreadCtx != NIL_RTTHREADCTX)
+ LogRel(("VMM: Thread-context hooks enabled!\n"));
+ else
+ LogRel(("VMM: Thread-context hooks unavailable.\n"));
+
return rc;
}
+#ifdef VBOX_WITH_RAW_MODE
/**
* Initializes the RC VMM.
*
@@ -545,7 +559,7 @@ VMMR3_INT_DECL(int) VMMR3InitRC(PVM pVM)
Assert(pVCpu && pVCpu->idCpu == 0);
/* In VMX mode, there's no need to init RC. */
- if (pVM->vmm.s.fSwitcherDisabled)
+ if (HMIsEnabled(pVM))
return VINF_SUCCESS;
AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
@@ -562,12 +576,13 @@ VMMR3_INT_DECL(int) VMMR3InitRC(PVM pVM)
{
CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
uint64_t u64TS = RTTimeProgramStartNanoTS();
- CPUMPushHyper(pVCpu, (uint32_t)(u64TS >> 32)); /* Param 3: The program startup TS - Hi. */
- CPUMPushHyper(pVCpu, (uint32_t)u64TS); /* Param 3: The program startup TS - Lo. */
+ CPUMPushHyper(pVCpu, (uint32_t)(u64TS >> 32)); /* Param 4: The program startup TS - Hi. */
+ CPUMPushHyper(pVCpu, (uint32_t)u64TS); /* Param 4: The program startup TS - Lo. */
+ CPUMPushHyper(pVCpu, vmmGetBuildType()); /* Param 3: Version argument. */
CPUMPushHyper(pVCpu, VMMGetSvnRev()); /* Param 2: Version argument. */
CPUMPushHyper(pVCpu, VMMGC_DO_VMMGC_INIT); /* Param 1: Operation. */
CPUMPushHyper(pVCpu, pVM->pVMRC); /* Param 0: pVM */
- CPUMPushHyper(pVCpu, 5 * sizeof(RTRCPTR)); /* trampoline param: stacksize. */
+ CPUMPushHyper(pVCpu, 6 * sizeof(RTRCPTR)); /* trampoline param: stacksize. */
CPUMPushHyper(pVCpu, RCPtrEP); /* Call EIP. */
CPUMSetHyperEIP(pVCpu, pVM->vmm.s.pfnCallTrampolineRC);
Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
@@ -608,6 +623,7 @@ VMMR3_INT_DECL(int) VMMR3InitRC(PVM pVM)
}
return rc;
}
+#endif /* VBOX_WITH_RAW_MODE */
/**
@@ -626,6 +642,12 @@ VMMR3_INT_DECL(int) VMMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
case VMINITCOMPLETED_RING3:
{
/*
+ * CPUM's post-initialization (APIC base MSR caching).
+ */
+ rc = CPUMR3InitCompleted(pVM);
+ AssertRCReturn(rc, rc);
+
+ /*
* Set page attributes to r/w for stack pages.
*/
for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
@@ -673,16 +695,21 @@ VMMR3_INT_DECL(int) VMMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
break;
}
- case VMINITCOMPLETED_RING0:
+ case VMINITCOMPLETED_HM:
{
/*
* Disable the periodic preemption timers if we can use the
* VMX-preemption timer instead.
*/
if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
- && HWACCMR3IsVmxPreemptionTimerUsed(pVM))
+ && HMR3IsVmxPreemptionTimerUsed(pVM))
pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
LogRel(("VMM: fUsePeriodicPreemptionTimers=%RTbool\n", pVM->vmm.s.fUsePeriodicPreemptionTimers));
+
+ /*
+ * CPUM's post-initialization (print CPUIDs).
+ */
+ CPUMR3LogCpuIds(pVM);
break;
}
@@ -816,11 +843,14 @@ VMMR3_INT_DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
/*
* Get other RC entry points.
*/
- int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMRCResumeGuest);
- AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Rra\n", rc));
+ if (!HMIsEnabled(pVM))
+ {
+ int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMRCResumeGuest);
+ AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Rra\n", rc));
- rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMRCResumeGuestV86);
- AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Rra\n", rc));
+ rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMRCResumeGuestV86);
+ AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Rra\n", rc));
+ }
/*
* Update the logger.
@@ -843,18 +873,20 @@ VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM)
int rc = VINF_SUCCESS;
RTRCPTR RCPtrLoggerFlush = 0;
- if (pVM->vmm.s.pRCLoggerR3
+ if ( pVM->vmm.s.pRCLoggerR3
#ifdef VBOX_WITH_RC_RELEASE_LOGGING
|| pVM->vmm.s.pRCRelLoggerR3
#endif
)
{
+ Assert(!HMIsEnabled(pVM));
rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", &RCPtrLoggerFlush);
AssertReleaseMsgRC(rc, ("vmmGCLoggerFlush not found! rc=%Rra\n", rc));
}
if (pVM->vmm.s.pRCLoggerR3)
{
+ Assert(!HMIsEnabled(pVM));
RTRCPTR RCPtrLoggerWrapper = 0;
rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", &RCPtrLoggerWrapper);
AssertReleaseMsgRC(rc, ("vmmGCLoggerWrapper not found! rc=%Rra\n", rc));
@@ -868,6 +900,7 @@ VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM)
#ifdef VBOX_WITH_RC_RELEASE_LOGGING
if (pVM->vmm.s.pRCRelLoggerR3)
{
+ Assert(!HMIsEnabled(pVM));
RTRCPTR RCPtrLoggerWrapper = 0;
rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &RCPtrLoggerWrapper);
AssertReleaseMsgRC(rc, ("vmmGCRelLoggerWrapper not found! rc=%Rra\n", rc));
@@ -901,7 +934,8 @@ VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM)
rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerFlush", &pfnLoggerFlush);
AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerFlush not found! rc=%Rra\n", rc), rc);
- rc = RTLogCreateForR0(&pR0LoggerR3->Logger, pR0LoggerR3->cbLogger, pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger),
+ rc = RTLogCreateForR0(&pR0LoggerR3->Logger, pR0LoggerR3->cbLogger,
+ pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger),
pfnLoggerWrapper, pfnLoggerFlush,
RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY);
AssertReleaseMsgRCReturn(rc, ("RTLogCreateForR0 failed! rc=%Rra\n", rc), rc);
@@ -909,7 +943,9 @@ VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM)
RTR0PTR pfnLoggerPrefix = NIL_RTR0PTR;
rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerPrefix", &pfnLoggerPrefix);
AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerPrefix not found! rc=%Rra\n", rc), rc);
- rc = RTLogSetCustomPrefixCallbackForR0(&pR0LoggerR3->Logger, pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger), pfnLoggerPrefix, NIL_RTR0PTR);
+ rc = RTLogSetCustomPrefixCallbackForR0(&pR0LoggerR3->Logger,
+ pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger),
+ pfnLoggerPrefix, NIL_RTR0PTR);
AssertReleaseMsgRCReturn(rc, ("RTLogSetCustomPrefixCallback failed! rc=%Rra\n", rc), rc);
pR0LoggerR3->idCpu = i;
@@ -918,8 +954,8 @@ VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM)
}
- rc = RTLogCopyGroupsAndFlagsForR0(&pR0LoggerR3->Logger, pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger), pDefault,
- RTLOGFLAGS_BUFFERED, UINT32_MAX);
+ rc = RTLogCopyGroupsAndFlagsForR0(&pR0LoggerR3->Logger, pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger),
+ pDefault, RTLOGFLAGS_BUFFERED, UINT32_MAX);
AssertRC(rc);
}
}
@@ -936,7 +972,7 @@ VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM)
*/
VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM)
{
- if (HWACCMIsEnabled(pVM))
+ if (HMIsEnabled(pVM))
return pVM->vmm.s.szRing0AssertMsg1;
RTRCPTR RCPtr;
@@ -949,6 +985,23 @@ VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM)
/**
+ * Returns the VMCPU of the specified virtual CPU.
+ *
+ * @returns The VMCPU pointer. NULL if @a idCpu or @a pUVM is invalid.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idCpu The ID of the virtual CPU.
+ */
+VMMR3DECL(PVMCPU) VMMR3GetCpuByIdU(PUVM pUVM, RTCPUID idCpu)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
+ AssertReturn(idCpu < pUVM->cCpus, NULL);
+ VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
+ return &pUVM->pVM->aCpus[idCpu];
+}
+
+
+/**
* Gets the pointer to a buffer containing the R0/RC RTAssertMsg2Weak output.
*
* @returns Pointer to the buffer.
@@ -956,7 +1009,7 @@ VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM)
*/
VMMR3DECL(const char *) VMMR3GetRZAssertMsg2(PVM pVM)
{
- if (HWACCMIsEnabled(pVM))
+ if (HMIsEnabled(pVM))
return pVM->vmm.s.szRing0AssertMsg2;
RTRCPTR RCPtr;
@@ -1062,6 +1115,7 @@ static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion,
}
+#ifdef VBOX_WITH_RAW_MODE
/**
* Resolve a builtin RC symbol.
*
@@ -1084,18 +1138,19 @@ VMMR3_INT_DECL(int) VMMR3GetImportRC(PVM pVM, const char *pszSymbol, PRTRCPTR pR
}
else if (!strcmp(pszSymbol, "g_RelLogger"))
{
-#ifdef VBOX_WITH_RC_RELEASE_LOGGING
+# ifdef VBOX_WITH_RC_RELEASE_LOGGING
if (pVM->vmm.s.pRCRelLoggerR3)
pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
*pRCPtrValue = pVM->vmm.s.pRCRelLoggerRC;
-#else
+# else
*pRCPtrValue = NIL_RTRCPTR;
-#endif
+# endif
}
else
return VERR_SYMBOL_NOT_FOUND;
return VINF_SUCCESS;
}
+#endif /* VBOX_WITH_RAW_MODE */
/**
@@ -1191,6 +1246,7 @@ static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser)
}
+#ifdef VBOX_WITH_RAW_MODE
/**
* Executes guest code in the raw-mode context.
*
@@ -1224,6 +1280,9 @@ VMMR3_INT_DECL(int) VMMR3RawRunGC(PVM pVM, PVMCPU pVCpu)
if (RT_UNLIKELY(!CPUMGetHyperCR3(pVCpu) || CPUMGetHyperCR3(pVCpu) != PGMGetHyperCR3(pVCpu)))
EMR3FatalError(pVCpu, VERR_VMM_HYPER_CR3_MISMATCH);
PGMMapCheck(pVM);
+# ifdef VBOX_WITH_SAFE_STR
+ SELMR3CheckShadowTR(pVM);
+# endif
#endif
int rc;
do
@@ -1262,6 +1321,7 @@ VMMR3_INT_DECL(int) VMMR3RawRunGC(PVM pVM, PVMCPU pVCpu)
/* Resume GC */
}
}
+#endif /* VBOX_WITH_RAW_MODE */
/**
@@ -1270,9 +1330,9 @@ VMMR3_INT_DECL(int) VMMR3RawRunGC(PVM pVM, PVMCPU pVCpu)
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
*/
-VMMR3_INT_DECL(int) VMMR3HwAccRunGC(PVM pVM, PVMCPU pVCpu)
+VMMR3_INT_DECL(int) VMMR3HmRunGC(PVM pVM, PVMCPU pVCpu)
{
- Log2(("VMMR3HwAccRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
+ Log2(("VMMR3HmRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
for (;;)
{
@@ -1282,14 +1342,14 @@ VMMR3_INT_DECL(int) VMMR3HwAccRunGC(PVM pVM, PVMCPU pVCpu)
#ifdef NO_SUPCALLR0VMM
rc = VERR_GENERAL_FAILURE;
#else
- rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HWACC_RUN, pVCpu->idCpu);
+ rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HM_RUN, pVCpu->idCpu);
if (RT_LIKELY(rc == VINF_SUCCESS))
rc = pVCpu->vmm.s.iLastGZRc;
#endif
} while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
#if 0 /* todo triggers too often */
- Assert(!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TO_R3));
+ Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3));
#endif
#ifdef LOG_ENABLED
@@ -1303,7 +1363,7 @@ VMMR3_INT_DECL(int) VMMR3HwAccRunGC(PVM pVM, PVMCPU pVCpu)
#endif /* !LOG_ENABLED */
if (rc != VINF_VMM_CALL_HOST)
{
- Log2(("VMMR3HwAccRunGC: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
+ Log2(("VMMR3HmRunGC: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
return rc;
}
rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
@@ -1339,7 +1399,7 @@ DECLCALLBACK(int) vmmR3SendSipi(PVM pVM, VMCPUID idCpu, uint32_t uVector)
pCtx->cs.u32Limit = UINT32_C(0x0000ffff);
pCtx->rip = 0;
- Log(("vmmR3SendSipi for VCPU %d with vector %x\n", uVector));
+ Log(("vmmR3SendSipi for VCPU %d with vector %x\n", idCpu, uVector));
# if 1 /* If we keep the EMSTATE_WAIT_SIPI method, then move this to EM.cpp. */
EMSetState(pVCpu, EMSTATE_HALTED);
@@ -1357,7 +1417,10 @@ DECLCALLBACK(int) vmmR3SendInitIpi(PVM pVM, VMCPUID idCpu)
VMCPU_ASSERT_EMT(pVCpu);
Log(("vmmR3SendInitIpi for VCPU %d\n", idCpu));
- CPUMR3ResetCpu(pVCpu);
+
+ PGMR3ResetCpu(pVM, pVCpu);
+ CPUMR3ResetCpu(pVM, pVCpu);
+
return VINF_EM_WAIT_SIPI;
}
@@ -1402,8 +1465,8 @@ VMMR3_INT_DECL(void) VMMR3SendInitIpi(PVM pVM, VMCPUID idCpu)
VMMR3DECL(int) VMMR3RegisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
{
VM_ASSERT_EMT(pVM);
- if (HWACCMIsEnabled(pVM))
- return HWACMMR3EnablePatching(pVM, pPatchMem, cbPatchMem);
+ if (HMIsEnabled(pVM))
+ return HMR3EnablePatching(pVM, pPatchMem, cbPatchMem);
return VERR_NOT_SUPPORTED;
}
@@ -1418,8 +1481,8 @@ VMMR3DECL(int) VMMR3RegisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbP
*/
VMMR3DECL(int) VMMR3DeregisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
{
- if (HWACCMIsEnabled(pVM))
- return HWACMMR3DisablePatching(pVM, pPatchMem, cbPatchMem);
+ if (HMIsEnabled(pVM))
+ return HMR3DisablePatching(pVM, pPatchMem, cbPatchMem);
return VINF_SUCCESS;
}
@@ -1668,6 +1731,7 @@ VMMR3DECL(int) VMMR3EmtRendezvous(PVM pVM, uint32_t fFlags, PFNVMMEMTRENDEZVOUS
/*
* Validate input.
*/
+ AssertReturn(pVM, VERR_INVALID_VM_HANDLE);
AssertMsg( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_INVALID
&& (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) <= VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
&& !(fFlags & ~VMMEMTRENDEZVOUS_FLAGS_VALID_MASK), ("%#x\n", fFlags));
@@ -1708,7 +1772,7 @@ VMMR3DECL(int) VMMR3EmtRendezvous(PVM pVM, uint32_t fFlags, PFNVMMEMTRENDEZVOUS
while (!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0))
{
- if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
+ if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
{
rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
if ( rc != VINF_SUCCESS
@@ -1720,7 +1784,7 @@ VMMR3DECL(int) VMMR3EmtRendezvous(PVM pVM, uint32_t fFlags, PFNVMMEMTRENDEZVOUS
ASMNopPause();
}
}
- Assert(!VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS));
+ Assert(!VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS));
Assert(!pVCpu->vmm.s.fInRendezvous);
pVCpu->vmm.s.fInRendezvous = true;
@@ -1838,6 +1902,7 @@ VMMR3_INT_DECL(int) VMMR3ReadR0Stack(PVM pVM, VMCPUID idCpu, RTHCUINTPTR R0Addr,
return VINF_SUCCESS;
}
+#ifdef VBOX_WITH_RAW_MODE
/**
* Calls a RC function.
@@ -1883,7 +1948,9 @@ VMMR3DECL(int) VMMR3CallRCV(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, va_list
cArgs /* edx */
);
+#if 0
memset(pVCpu->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */
+#endif
PRTGCUINTPTR32 pFrame = (PRTGCUINTPTR32)(pVCpu->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE) - cArgs;
int i = cArgs;
while (i-- > 0)
@@ -1937,6 +2004,7 @@ VMMR3DECL(int) VMMR3CallRCV(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, va_list
}
}
+#endif /* VBOX_WITH_RAW_MODE */
/**
* Wrapper for SUPR3CallVMMR0Ex which will deal with VINF_VMM_CALL_HOST returns.
@@ -1987,6 +2055,7 @@ VMMR3DECL(int) VMMR3CallR0(PVM pVM, uint32_t uOperation, uint64_t u64Arg, PSUPVM
}
+#ifdef VBOX_WITH_RAW_MODE
/**
* Resumes executing hypervisor code when interrupted by a queue flush or a
* debug event.
@@ -2009,29 +2078,29 @@ VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM, PVMCPU pVCpu)
Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
do
{
-#ifdef NO_SUPCALLR0VMM
+# ifdef NO_SUPCALLR0VMM
rc = VERR_GENERAL_FAILURE;
-#else
+# else
rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
if (RT_LIKELY(rc == VINF_SUCCESS))
rc = pVCpu->vmm.s.iLastGZRc;
-#endif
+# endif
} while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
/*
* Flush the loggers.
*/
-#ifdef LOG_ENABLED
+# ifdef LOG_ENABLED
PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
if ( pLogger
&& pLogger->offScratch > 0)
RTLogFlushRC(NULL, pLogger);
-#endif
-#ifdef VBOX_WITH_RC_RELEASE_LOGGING
+# endif
+# ifdef VBOX_WITH_RC_RELEASE_LOGGING
PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
-#endif
+# endif
if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
VMMR3FatalDump(pVM, pVCpu, rc);
if (rc != VINF_VMM_CALL_HOST)
@@ -2044,6 +2113,7 @@ VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM, PVMCPU pVCpu)
return rc;
}
}
+#endif /* VBOX_WITH_RAW_MODE */
/**
@@ -2060,8 +2130,8 @@ static int vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu)
* We must also check for pending critsect exits or else we can deadlock
* when entering other critsects here.
*/
- if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
- PDMCritSectFF(pVCpu);
+ if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
+ PDMCritSectBothFF(pVCpu);
switch (pVCpu->vmm.s.enmCallRing3Operation)
{
@@ -2076,6 +2146,26 @@ static int vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu)
}
/*
+ * Enter a r/w critical section exclusively.
+ */
+ case VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL:
+ {
+ pVCpu->vmm.s.rcCallRing3 = PDMR3CritSectRwEnterExclEx((PPDMCRITSECTRW)(uintptr_t)pVCpu->vmm.s.u64CallRing3Arg,
+ true /*fCallRing3*/);
+ break;
+ }
+
+ /*
+ * Enter a r/w critical section shared.
+ */
+ case VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED:
+ {
+ pVCpu->vmm.s.rcCallRing3 = PDMR3CritSectRwEnterSharedEx((PPDMCRITSECTRW)(uintptr_t)pVCpu->vmm.s.u64CallRing3Arg,
+ true /*fCallRing3*/);
+ break;
+ }
+
+ /*
* Acquire the PDM lock.
*/
case VMMCALLRING3_PDM_LOCK:
@@ -2311,14 +2401,16 @@ static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *p
PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3);
PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3_NON_GLOBAL);
PRINT_FLAG(VMCPU_FF_,TLB_FLUSH);
+ PRINT_FLAG(VMCPU_FF_,INHIBIT_INTERRUPTS);
+ PRINT_FLAG(VMCPU_FF_,TO_R3);
+#ifdef VBOX_WITH_RAW_MODE
PRINT_FLAG(VMCPU_FF_,TRPM_SYNC_IDT);
PRINT_FLAG(VMCPU_FF_,SELM_SYNC_TSS);
PRINT_FLAG(VMCPU_FF_,SELM_SYNC_GDT);
PRINT_FLAG(VMCPU_FF_,SELM_SYNC_LDT);
- PRINT_FLAG(VMCPU_FF_,INHIBIT_INTERRUPTS);
PRINT_FLAG(VMCPU_FF_,CSAM_SCAN_PAGE);
PRINT_FLAG(VMCPU_FF_,CSAM_PENDING_ACTION);
- PRINT_FLAG(VMCPU_FF_,TO_R3);
+#endif
if (f)
pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
else
@@ -2338,7 +2430,7 @@ static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *p
PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY_POST,_MASK);
PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY,_MASK);
PRINT_GROUP(VMCPU_FF_,RESUME_GUEST,_MASK);
- PRINT_GROUP(VMCPU_FF_,HWACCM_TO_R3,_MASK);
+ PRINT_GROUP(VMCPU_FF_,HM_TO_R3,_MASK);
PRINT_GROUP(VMCPU_FF_,ALL_REM,_MASK);
if (c)
pHlp->pfnPrintf(pHlp, "\n");
diff --git a/src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp b/src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp
index 528561bf..56dc1d5b 100644
--- a/src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp
+++ b/src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2010 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -33,7 +33,7 @@
#include <VBox/err.h>
#include <VBox/param.h>
#include <VBox/version.h>
-#include <VBox/vmm/hwaccm.h>
+#include <VBox/vmm/hm.h>
#include <iprt/assert.h>
#include <iprt/time.h>
#include <iprt/stream.h>
@@ -288,6 +288,7 @@ VMMR3DECL(void) VMMR3FatalDump(PVM pVM, PVMCPU pVCpu, int rcErr)
case VINF_EM_RAW_IRET_TRAP:
case VINF_EM_DBG_HYPER_BREAKPOINT:
case VINF_EM_DBG_HYPER_STEPPED:
+ case VINF_EM_TRIPLE_FAULT:
case VERR_VMM_HYPER_CR3_MISMATCH:
{
/*
@@ -299,13 +300,14 @@ VMMR3DECL(void) VMMR3FatalDump(PVM pVM, PVMCPU pVCpu, int rcErr)
uint8_t u8TrapNo = 0xce;
RTGCUINT uErrorCode = 0xdeadface;
RTGCUINTPTR uCR2 = 0xdeadface;
- int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
- if (!HWACCMIsEnabled(pVM))
+ uint8_t cbInstr = UINT8_MAX;
+ int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2, &cbInstr);
+ if (!HMIsEnabled(pVM))
{
if (RT_SUCCESS(rc2))
pHlp->pfnPrintf(pHlp,
- "!! TRAP=%02x ERRCD=%RGv CR2=%RGv EIP=%RX32 Type=%d\n",
- u8TrapNo, uErrorCode, uCR2, uEIP, enmType);
+ "!! TRAP=%02x ERRCD=%RGv CR2=%RGv EIP=%RX32 Type=%d cbInstr=%02x\n",
+ u8TrapNo, uErrorCode, uCR2, uEIP, enmType, cbInstr);
else
pHlp->pfnPrintf(pHlp,
"!! EIP=%RX32 NOTRAP\n",
@@ -313,13 +315,13 @@ VMMR3DECL(void) VMMR3FatalDump(PVM pVM, PVMCPU pVCpu, int rcErr)
}
else if (RT_SUCCESS(rc2))
pHlp->pfnPrintf(pHlp,
- "!! ACTIVE TRAP=%02x ERRCD=%RGv CR2=%RGv PC=%RGr Type=%d (Guest!)\n",
- u8TrapNo, uErrorCode, uCR2, CPUMGetGuestRIP(pVCpu), enmType);
+ "!! ACTIVE TRAP=%02x ERRCD=%RGv CR2=%RGv PC=%RGr Type=%d cbInstr=%02x (Guest!)\n",
+ u8TrapNo, uErrorCode, uCR2, CPUMGetGuestRIP(pVCpu), enmType, cbInstr);
/*
* Dump the relevant hypervisor registers and stack.
*/
- if (HWACCMIsEnabled(pVM))
+ if (HMIsEnabled(pVM))
{
if ( rcErr == VERR_VMM_RING0_ASSERTION /* fInRing3Call has already been cleared here. */
|| pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
@@ -414,7 +416,7 @@ VMMR3DECL(void) VMMR3FatalDump(PVM pVM, PVMCPU pVCpu, int rcErr)
esp.FlatPtr = esp.off = pVCpu->vmm.s.CallRing3JmpBufR0.SavedEsp;
PCDBGFSTACKFRAME pFirstFrame;
- rc2 = DBGFR3StackWalkBeginEx(pVM, pVCpu->idCpu, DBGFCODETYPE_RING0, &ebp, &esp, &pc,
+ rc2 = DBGFR3StackWalkBeginEx(pVM->pUVM, pVCpu->idCpu, DBGFCODETYPE_RING0, &ebp, &esp, &pc,
DBGFRETURNTYPE_INVALID, &pFirstFrame);
if (RT_SUCCESS(rc2))
{
@@ -526,7 +528,8 @@ VMMR3DECL(void) VMMR3FatalDump(PVM pVM, PVMCPU pVCpu, int rcErr)
/* Disassemble the instruction. */
char szInstr[256];
- rc2 = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0, DBGF_DISAS_FLAGS_CURRENT_HYPER | DBGF_DISAS_FLAGS_DEFAULT_MODE,
+ rc2 = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
+ DBGF_DISAS_FLAGS_CURRENT_HYPER | DBGF_DISAS_FLAGS_DEFAULT_MODE,
&szInstr[0], sizeof(szInstr), NULL);
if (RT_SUCCESS(rc2))
pHlp->pfnPrintf(pHlp,
@@ -537,12 +540,12 @@ VMMR3DECL(void) VMMR3FatalDump(PVM pVM, PVMCPU pVCpu, int rcErr)
"!!\n"
"!!\n"
"!!\n");
- rc2 = DBGFR3Info(pVM, "cpumhyper", "verbose", pHlp);
+ rc2 = DBGFR3Info(pVM->pUVM, "cpumhyper", "verbose", pHlp);
fDoneHyper = true;
/* Callstack. */
PCDBGFSTACKFRAME pFirstFrame;
- rc2 = DBGFR3StackWalkBegin(pVM, pVCpu->idCpu, DBGFCODETYPE_HYPER, &pFirstFrame);
+ rc2 = DBGFR3StackWalkBegin(pVM->pUVM, pVCpu->idCpu, DBGFCODETYPE_HYPER, &pFirstFrame);
if (RT_SUCCESS(rc2))
{
pHlp->pfnPrintf(pHlp,
@@ -591,15 +594,15 @@ VMMR3DECL(void) VMMR3FatalDump(PVM pVM, PVMCPU pVCpu, int rcErr)
"%.*Rhxd\n",
pVCpu->vmm.s.pbEMTStackRC, pVCpu->vmm.s.pbEMTStackBottomRC,
VMM_STACK_SIZE, pVCpu->vmm.s.pbEMTStackR3);
- } /* !HWACCMIsEnabled */
+ } /* !HMIsEnabled */
break;
}
case VERR_IEM_INSTR_NOT_IMPLEMENTED:
case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
{
- DBGFR3Info(pVM, "cpumguest", NULL, pHlp);
- DBGFR3Info(pVM, "cpumguestinstr", NULL, pHlp);
+ DBGFR3Info(pVM->pUVM, "cpumguest", NULL, pHlp);
+ DBGFR3Info(pVM->pUVM, "cpumguestinstr", NULL, pHlp);
break;
}
@@ -642,7 +645,7 @@ VMMR3DECL(void) VMMR3FatalDump(PVM pVM, PVMCPU pVCpu, int rcErr)
"!! {%s, %s}\n"
"!!\n",
aInfo[i].pszInfo, aInfo[i].pszArgs);
- DBGFR3Info(pVM, aInfo[i].pszInfo, aInfo[i].pszArgs, pHlp);
+ DBGFR3Info(pVM->pUVM, aInfo[i].pszInfo, aInfo[i].pszArgs, pHlp);
}
/* All other info items */
diff --git a/src/VBox/VMM/VMMR3/VMMR3.def b/src/VBox/VMM/VMMR3/VMMR3.def
index 981c7e5b..2b894bfd 100644
--- a/src/VBox/VMM/VMMR3/VMMR3.def
+++ b/src/VBox/VMM/VMMR3/VMMR3.def
@@ -3,7 +3,7 @@
; VMM Ring-3 Context DLL - Definition file.
;
-; Copyright (C) 2010-2011 Oracle Corporation
+; Copyright (C) 2010-2013 Oracle Corporation
;
; This file is part of VirtualBox Open Source Edition (OSE), as
; available from http://www.virtualbox.org. This file is free software;
@@ -29,6 +29,7 @@ EXPORTS
CFGMR3GetValueType
CFGMR3Dump
CFGMR3CreateTree
+ CFGMR3DestroyTree
CFGMR3GetValueName
CFGMR3GetName
CFGMR3RemoveNode
@@ -91,10 +92,69 @@ EXPORTS
CFGMR3QueryStringDef
CFGMR3QueryString
CFGMR3QueryStringAlloc
+ CFGMR3GetParent
+ CFGMR3GetRootU
+
+ CSAMR3IsEnabled
+ CSAMR3SetScanningEnabled
+
+ DBGCCreate
+
+ DBGFR3CoreWrite
+ DBGFR3Info
+ DBGFR3InfoRegisterExternal
+ DBGFR3InjectNMI
+ DBGFR3LogModifyDestinations
+ DBGFR3LogModifyFlags
+ DBGFR3LogModifyGroups
+ DBGFR3OSDetect
+ DBGFR3OSQueryNameAndVersion
+ DBGFR3RegCpuQueryU32
+ DBGFR3RegFormatValue
+ DBGFR3RegNmQuery
+ DBGFR3RegNmQueryAll
+ DBGFR3RegNmQueryAllCount
+ DBGFR3OSDeregister
+ DBGFR3OSRegister
+ DBGFR3MemReadString
+ DBGFR3MemRead
+ DBGFR3MemScan
+ DBGFR3AddrFromFlat
+ DBGFR3AsSymbolByName
+ DBGFR3AsResolveAndRetain
+ DBGFR3AsSetAlias
+ DBGFR3AddrAdd
+ DBGFR3AddrSub
+ DBGFR3AsGetConfig
+ DBGFR3CpuGetMode
+ DBGFR3AddrFromSelOff
+
+ EMR3QueryExecutionPolicy
+ EMR3SetExecutionPolicy
+
+ FTMR3CancelStandby
+ FTMR3PowerOn
+
+ HMR3IsEnabled
+ HMR3IsNestedPagingActive
+ HMR3IsUXActive
+ HMR3IsVpidActive
MMR3HeapFree
MMR3HeapRealloc
+ PATMR3AllowPatching
+ PATMR3IsEnabled
+
+ PDMR3AsyncCompletionBwMgrSetMaxForFile
+ PDMR3DeviceAttach
+ PDMR3DeviceDetach
+ PDMR3DriverAttach
+ PDMR3NsBwGroupSetLimit
+ PDMR3QueryDeviceLun
+ PDMR3QueryDriverOnLun
+ PDMR3QueryLun
+
PDMCritSectEnter
PDMCritSectEnterDebug
PDMCritSectTryEnter
@@ -126,6 +186,11 @@ EXPORTS
PDMR3ThreadSleep
PDMR3ThreadSuspend
+ PDMR3UsbCreateEmulatedDevice
+ PDMR3UsbCreateProxyDevice
+ PDMR3UsbDetachDevice
+ PDMR3UsbHasHub
+
PGMHandlerPhysicalPageTempOff
PGMPhysReadGCPtr
PGMPhysSimpleDirtyWriteGCPtr
@@ -134,6 +199,8 @@ EXPORTS
PGMPhysSimpleWriteGCPtr
PGMPhysWriteGCPtr
PGMShwMakePageWritable
+ PGMR3QueryGlobalMemoryStats
+ PGMR3QueryMemoryStats
SSMR3Close
SSMR3DeregisterExternal
@@ -211,6 +278,15 @@ EXPORTS
SSMR3Skip
SSMR3SkipToEndOfUnit
SSMR3ValidateFile
+ SSMR3Cancel
+ SSMR3RegisterExternal
+
+ STAMR3Dump
+ STAMR3Enum
+ STAMR3Reset
+ STAMR3Snapshot
+ STAMR3SnapshotFree
+ STAMR3GetUnit
TMR3TimerSetCritSect
TMR3TimerLoad
@@ -241,7 +317,47 @@ EXPORTS
TMTimerToMilli
TMTimerToNano
TMTimerUnlock
+ TMR3GetWarpDrive
+ TMR3SetWarpDrive
+
+ VMMGetCpu
VMMGetSvnRev
VMSetError
VMSetErrorV
+ VMR3AtErrorDeregister
+ VMR3AtErrorRegister
+ VMR3AtRuntimeErrorRegister
+ VMR3AtStateRegister
+ VMR3Create
+ VMR3Destroy
+ VMR3GetCpuCoreAndPackageIdFromCpuId
+ VMR3GetStateName
+ VMR3GetStateU
+ VMR3GetVM
+ VMR3HotPlugCpu
+ VMR3HotUnplugCpu
+ VMR3LoadFromFile
+ VMR3LoadFromStream
+ VMR3PowerOff
+ VMR3PowerOn
+ VMR3ReleaseUVM
+ VMR3ReqCallNoWaitU
+ VMR3ReqCallU
+ VMR3ReqCallVoidWaitU
+ VMR3ReqCallWaitU
+ VMR3ReqFree
+ VMR3ReqPriorityCallWaitU
+ VMR3ReqWait
+ VMR3Reset
+ VMR3Resume
+ VMR3RetainUVM
+ VMR3Save
+ VMR3SetCpuExecutionCap
+ VMR3SetError
+ VMR3SetPowerOffInsteadOfReset
+ VMR3Suspend
+ VMR3Teleport
+ VMR3AtStateDeregister
+ VMR3GetUVM
+
diff --git a/src/VBox/VMM/VMMR3/VMMSwitcher.cpp b/src/VBox/VMM/VMMR3/VMMSwitcher.cpp
index 301f4e42..f6aa164e 100644
--- a/src/VBox/VMM/VMMR3/VMMSwitcher.cpp
+++ b/src/VBox/VMM/VMMR3/VMMSwitcher.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2012 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -15,12 +15,14 @@
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
+
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_VMM
#include <VBox/vmm/vmm.h>
#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/hm.h>
#include <VBox/vmm/selm.h>
#include <VBox/vmm/mm.h>
#include <VBox/sup.h>
@@ -45,24 +47,24 @@
/** Array of switcher definitions.
* The type and index shall match!
*/
-static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =
+static PVMMSWITCHERDEF g_apRawModeSwitchers[VMMSWITCHER_MAX] =
{
NULL, /* invalid entry */
#ifdef VBOX_WITH_RAW_MODE
# ifndef RT_ARCH_AMD64
&vmmR3Switcher32BitTo32Bit_Def,
&vmmR3Switcher32BitToPAE_Def,
- &vmmR3Switcher32BitToAMD64_Def,
+ NULL, //&vmmR3Switcher32BitToAMD64_Def,
&vmmR3SwitcherPAETo32Bit_Def,
&vmmR3SwitcherPAEToPAE_Def,
- &vmmR3SwitcherPAEToAMD64_Def,
+ NULL, //&vmmR3SwitcherPAEToAMD64_Def,
NULL, //&vmmR3SwitcherPAETo32Bit_Def,
# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
&vmmR3SwitcherAMD64ToPAE_Def,
# else
NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
# endif
- NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
+ NULL, //&vmmR3SwitcherAMD64ToAMD64_Def,
# else /* RT_ARCH_AMD64 */
NULL, //&vmmR3Switcher32BitTo32Bit_Def,
NULL, //&vmmR3Switcher32BitToPAE_Def,
@@ -72,7 +74,7 @@ static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =
NULL, //&vmmR3SwitcherPAEToAMD64_Def,
&vmmR3SwitcherAMD64To32Bit_Def,
&vmmR3SwitcherAMD64ToPAE_Def,
- NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
+ NULL, //&vmmR3SwitcherAMD64ToAMD64_Def,
# endif /* RT_ARCH_AMD64 */
#else /* !VBOX_WITH_RAW_MODE */
NULL,
@@ -83,11 +85,126 @@ static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =
NULL,
NULL,
NULL,
- NULL
+ NULL,
+#endif /* !VBOX_WITH_RAW_MODE */
+#ifndef RT_ARCH_AMD64
+ &vmmR3SwitcherX86Stub_Def,
+ NULL,
+#else
+ NULL,
+ &vmmR3SwitcherAMD64Stub_Def,
+#endif
+};
+
+/** Array of switcher definitions.
+ * The type and index shall match!
+ */
+static PVMMSWITCHERDEF g_apHmSwitchers[VMMSWITCHER_MAX] =
+{
+ NULL, /* invalid entry */
+#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
+ NULL, //&vmmR3Switcher32BitTo32Bit_Def,
+ NULL, //&vmmR3Switcher32BitToPAE_Def,
+ &vmmR3Switcher32BitToAMD64_Def,
+ NULL, //&vmmR3SwitcherPAETo32Bit_Def,
+ NULL, //&vmmR3SwitcherPAEToPAE_Def,
+ &vmmR3SwitcherPAEToAMD64_Def,
+ NULL, //&vmmR3SwitcherPAETo32Bit_Def,
+ NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
+ NULL, //&vmmR3SwitcherAMD64ToAMD64_Def,
+#else /* !VBOX_WITH_RAW_MODE */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
#endif /* !VBOX_WITH_RAW_MODE */
+#ifndef RT_ARCH_AMD64
+ &vmmR3SwitcherX86Stub_Def,
+ NULL,
+#else
+ NULL,
+ &vmmR3SwitcherAMD64Stub_Def,
+#endif
};
+# ifdef VBOX_WITH_64ON32_IDT
+/**
+ * Initializes the 64-bit IDT for 64-bit guest on 32-bit host switchers.
+ *
+ * This is only used as a debugging aid when we cannot find out why something
+ * goes haywire in the intermediate context.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pSwitcher The switcher descriptor.
+ * @param pbDst Where the switcher code was just copied.
+ * @param HCPhysDst The host physical address corresponding to @a pbDst.
+ */
+static void vmmR3Switcher32On64IdtInit(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pbDst, RTHCPHYS HCPhysDst)
+{
+ AssertRelease(pSwitcher->offGCCode > 0 && pSwitcher->offGCCode < pSwitcher->cbCode);
+ AssertRelease(pSwitcher->cbCode < _64K);
+ RTSEL uCs64 = SELMGetHyperCS64(pVM);
+
+ PX86DESC64GATE paIdt = (PX86DESC64GATE)(pbDst + pSwitcher->offGCCode);
+ for (uint32_t i = 0 ; i < 256; i++)
+ {
+ AssertRelease(((uint64_t *)&paIdt[i])[0] < pSwitcher->cbCode);
+ AssertRelease(((uint64_t *)&paIdt[i])[1] == 0);
+ uint64_t uHandler = HCPhysDst + paIdt[i].u16OffsetLow;
+ paIdt[i].u16OffsetLow = (uint16_t)uHandler;
+ paIdt[i].u16Sel = uCs64;
+ paIdt[i].u3IST = 0;
+ paIdt[i].u5Reserved = 0;
+ paIdt[i].u4Type = AMD64_SEL_TYPE_SYS_INT_GATE;
+ paIdt[i].u1DescType = 0 /* system */;
+ paIdt[i].u2Dpl = 3;
+ paIdt[i].u1Present = 1;
+ paIdt[i].u16OffsetHigh = (uint16_t)(uHandler >> 16);
+ paIdt[i].u32Reserved = (uint32_t)(uHandler >> 32);
+ }
+
+ for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
+ {
+ uint64_t uIdtr = HCPhysDst + pSwitcher->offGCCode; AssertRelease(uIdtr < UINT32_MAX);
+ CPUMSetHyperIDTR(&pVM->aCpus[iCpu], uIdtr, 16*256 + iCpu);
+ }
+}
+
+
+/**
+ * Relocates the 64-bit IDT for 64-bit guest on 32-bit host switchers.
+ *
+ * @param pVM The cross context VM structure.
+ * @param pSwitcher The switcher descriptor.
+ * @param pbDst Where the switcher code was just copied.
+ * @param HCPhysDst The host physical address corresponding to @a pbDst.
+ */
+static void vmmR3Switcher32On64IdtRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pbDst, RTHCPHYS HCPhysDst)
+{
+ AssertRelease(pSwitcher->offGCCode > 0 && pSwitcher->offGCCode < pSwitcher->cbCode && pSwitcher->cbCode < _64K);
+
+ /* The intermediate context doesn't move, but the CS may. */
+ RTSEL uCs64 = SELMGetHyperCS64(pVM);
+ PX86DESC64GATE paIdt = (PX86DESC64GATE)(pbDst + pSwitcher->offGCCode);
+ for (uint32_t i = 0 ; i < 256; i++)
+ paIdt[i].u16Sel = uCs64;
+
+ /* Just in case... */
+ for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
+ {
+ uint64_t uIdtr = HCPhysDst + pSwitcher->offGCCode; AssertRelease(uIdtr < UINT32_MAX);
+ CPUMSetHyperIDTR(&pVM->aCpus[iCpu], uIdtr, 16*256 + iCpu);
+ }
+}
+# endif /* VBOX_WITH_64ON32_IDT */
+
+
/**
* VMMR3Init worker that initiates the switcher code (aka core code).
*
@@ -99,17 +216,19 @@ static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =
*/
int vmmR3SwitcherInit(PVM pVM)
{
-#ifndef VBOX_WITH_RAW_MODE
+#if !defined(VBOX_WITH_RAW_MODE) && (HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL))
return VINF_SUCCESS;
#else
+
/*
* Calc the size.
*/
+ const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
unsigned cbCoreCode = 0;
- for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
+ for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++)
{
pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
- PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
+ PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher];
if (pSwitcher)
{
AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
@@ -177,14 +296,22 @@ int vmmR3SwitcherInit(PVM pVM)
if (RT_SUCCESS(rc))
{
/*
- * copy the code.
+ * Copy the code.
*/
- for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
+ for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++)
{
- PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
+ PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher];
if (pSwitcher)
- memcpy((uint8_t *)pVM->vmm.s.pvCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher],
- pSwitcher->pvCode, pSwitcher->cbCode);
+ {
+ uint8_t *pbDst = (uint8_t *)pVM->vmm.s.pvCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher];
+ memcpy(pbDst, pSwitcher->pvCode, pSwitcher->cbCode);
+# ifdef VBOX_WITH_64ON32_IDT
+ if ( pSwitcher->enmType == VMMSWITCHER_32_TO_AMD64
+ || pSwitcher->enmType == VMMSWITCHER_PAE_TO_AMD64)
+ vmmR3Switcher32On64IdtInit(pVM, pSwitcher, pbDst,
+ pVM->vmm.s.HCPhysCoreCode + pVM->vmm.s.aoffSwitchers[iSwitcher]);
+# endif
+ }
}
/*
@@ -204,6 +331,7 @@ int vmmR3SwitcherInit(PVM pVM)
* Finally, PGM probably has selected a switcher already but we need
* to get the routine addresses, so we'll reselect it.
* This may legally fail so, we're ignoring the rc.
+ * Note! See HMIsEnabled hack in selector function.
*/
VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher);
return rc;
@@ -233,13 +361,14 @@ int vmmR3SwitcherInit(PVM pVM)
*/
void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta)
{
-#ifdef VBOX_WITH_RAW_MODE
+#if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL))
/*
* Relocate all the switchers.
*/
- for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
+ const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
+ for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++)
{
- PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
+ PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher];
if (pSwitcher && pSwitcher->pfnRelocate)
{
unsigned off = pVM->vmm.s.aoffSwitchers[iSwitcher];
@@ -249,20 +378,31 @@ void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta)
(uint8_t *)pVM->vmm.s.pvCoreCodeR3 + off,
pVM->vmm.s.pvCoreCodeRC + off,
pVM->vmm.s.HCPhysCoreCode + off);
+# ifdef VBOX_WITH_64ON32_IDT
+ if ( pSwitcher->enmType == VMMSWITCHER_32_TO_AMD64
+ || pSwitcher->enmType == VMMSWITCHER_PAE_TO_AMD64)
+ vmmR3Switcher32On64IdtRelocate(pVM, pSwitcher,
+ (uint8_t *)pVM->vmm.s.pvCoreCodeR3 + off,
+ pVM->vmm.s.HCPhysCoreCode + off);
+# endif
}
}
/*
* Recalc the RC address for the current switcher.
*/
- PVMMSWITCHERDEF pSwitcher = s_apSwitchers[pVM->vmm.s.enmSwitcher];
- RTRCPTR RCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
- pVM->vmm.s.pfnRCToHost = RCPtr + pSwitcher->offRCToHost;
- pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offRCCallTrampoline;
- pVM->pfnVMMRCToHostAsm = RCPtr + pSwitcher->offRCToHostAsm;
- pVM->pfnVMMRCToHostAsmNoReturn = RCPtr + pSwitcher->offRCToHostAsmNoReturn;
-
-// AssertFailed();
+ PVMMSWITCHERDEF pSwitcher = papSwitchers[pVM->vmm.s.enmSwitcher];
+ if (pSwitcher)
+ {
+ RTRCPTR RCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
+ pVM->vmm.s.pfnRCToHost = RCPtr + pSwitcher->offRCToHost;
+ pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offRCCallTrampoline;
+ pVM->pfnVMMRCToHostAsm = RCPtr + pSwitcher->offRCToHostAsm;
+ pVM->pfnVMMRCToHostAsmNoReturn = RCPtr + pSwitcher->offRCToHostAsmNoReturn;
+ }
+ else
+ AssertRelease(HMIsEnabled(pVM));
+
#else
NOREF(pVM);
#endif
@@ -270,6 +410,8 @@ void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta)
}
+#if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL))
+
/**
* Generic switcher code relocator.
*
@@ -285,7 +427,8 @@ void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta)
* @param GCPtrGDT The GC address of the hypervisor GDT.
* @param SelCS64 The 64-bit mode hypervisor CS selector.
*/
-static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode,
+static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher,
+ RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode,
RTSEL SelCS, RTSEL SelDS, RTSEL SelTSS, RTGCPTR GCPtrGDT, RTSEL SelCS64)
{
union
@@ -618,18 +761,18 @@ static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR
break;
}
-#if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
+# if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
/*
* 64-bit HC Code Selector (no argument).
*/
case FIX_HC_64BIT_CS:
{
Assert(offSrc < pSwitcher->cbCode);
-# if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
+# if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
*uSrc.pu16 = 0x80; /* KERNEL64_CS from i386/seg.h */
-# else
+# else
AssertFatalMsgFailed(("FIX_HC_64BIT_CS not implemented for this host\n"));
-# endif
+# endif
break;
}
@@ -642,7 +785,7 @@ static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR
*uSrc.pu64 = pVM->pVMR0 + RT_OFFSETOF(VM, cpum);
break;
}
-#endif
+# endif
/*
* 64-bit HC pointer fixup to (HC) target within the code (32-bit offset).
*/
@@ -655,7 +798,7 @@ static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR
break;
}
-#ifdef RT_ARCH_X86
+# ifdef RT_ARCH_X86
case FIX_GC_64_BIT_CPUM_OFF:
{
uint32_t offCPUM = *u.pu32++;
@@ -663,7 +806,7 @@ static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR
*uSrc.pu64 = (uint32_t)(VM_RC_ADDR(pVM, &pVM->cpum) + offCPUM);
break;
}
-#endif
+# endif
/*
* 32-bit ID pointer to (ID) target within the code (32-bit offset).
@@ -704,7 +847,7 @@ static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR
break;
}
-#ifdef VBOX_WITH_NMI
+# ifdef VBOX_WITH_NMI
/*
* 32-bit address to the APIC base.
*/
@@ -713,7 +856,7 @@ static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR
*uSrc.pu32 = pVM->vmm.s.GCPtrApicBase;
break;
}
-#endif
+# endif
default:
AssertReleaseMsgFailed(("Unknown fixup %d in switcher %s\n", u8, pSwitcher->pszDesc));
@@ -721,7 +864,7 @@ static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR
}
}
-#ifdef LOG_ENABLED
+# ifdef LOG_ENABLED
/*
* If Log2 is enabled disassemble the switcher code.
*
@@ -856,9 +999,25 @@ static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR
}
}
}
-#endif
+# endif
}
+/**
+ * Wrapper around SELMGetHyperGDT() that avoids calling it when raw-mode context
+ * is not initialized.
+ *
+ * @returns Raw-mode contet GDT address. Null pointer if not applicable.
+ * @param pVM The cross context VM structure.
+ */
+static RTRCPTR vmmR3SwitcherGetHyperGDT(PVM pVM)
+{
+ if (HMIsRawModeCtxNeeded(pVM))
+ return SELMGetHyperGDT(pVM);
+# if HC_ARCH_BITS != 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
+ AssertFailed(); /* This path is only applicable to some 32-bit hosts. */
+# endif
+ return NIL_RTRCPTR;
+}
/**
* Relocator for the 32-Bit to 32-Bit world switcher.
@@ -886,7 +1045,7 @@ DECLCALLBACK(void) vmmR3Switcher32BitToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSw
DECLCALLBACK(void) vmmR3Switcher32BitToAMD64_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
{
vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
- SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
+ SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), vmmR3SwitcherGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
}
@@ -915,7 +1074,7 @@ DECLCALLBACK(void) vmmR3SwitcherPAEToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwit
DECLCALLBACK(void) vmmR3SwitcherPAEToAMD64_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
{
vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
- SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
+ SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), vmmR3SwitcherGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
}
@@ -959,14 +1118,17 @@ VMMR3_INT_DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
return VERR_INVALID_PARAMETER;
}
- /* Do nothing if the switcher is disabled. */
- if (pVM->vmm.s.fSwitcherDisabled)
- return VINF_SUCCESS;
+ /*
+ * Override it if HM is active.
+ */
+ if (HMIsEnabled(pVM))
+ pVM->vmm.s.enmSwitcher = HC_ARCH_BITS == 64 ? VMMSWITCHER_AMD64_STUB : VMMSWITCHER_X86_STUB;
/*
* Select the new switcher.
*/
- PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher];
+ const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
+ PVMMSWITCHERDEF pSwitcher = papSwitchers[enmSwitcher];
if (pSwitcher)
{
Log(("VMMR3SelectSwitcher: enmSwitcher %d -> %d %s\n", pVM->vmm.s.enmSwitcher, enmSwitcher, pSwitcher->pszDesc));
@@ -986,25 +1148,7 @@ VMMR3_INT_DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
return VERR_NOT_IMPLEMENTED;
}
-
-/**
- * Disable the switcher logic permanently.
- *
- * @returns VBox status code.
- * @param pVM Pointer to the VM.
- */
-VMMR3_INT_DECL(int) VMMR3DisableSwitcher(PVM pVM)
-{
-/** @todo r=bird: I would suggest that we create a dummy switcher which just does something like:
- * @code
- * mov eax, VERR_VMM_DUMMY_SWITCHER
- * ret
- * @endcode
- * And then check for fSwitcherDisabled in VMMR3SelectSwitcher() in order to prevent it from being removed.
- */
- pVM->vmm.s.fSwitcherDisabled = true;
- return VINF_SUCCESS;
-}
+#endif /* #defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)) */
/**
@@ -1019,20 +1163,21 @@ VMMR3_INT_DECL(RTR0PTR) VMMR3GetHostToGuestSwitcher(PVM pVM, VMMSWITCHER enmSwit
/*
* Validate input.
*/
- if ( enmSwitcher < VMMSWITCHER_INVALID
- || enmSwitcher >= VMMSWITCHER_MAX)
- {
- AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
- return NIL_RTR0PTR;
- }
+ AssertMsgReturn( enmSwitcher == VMMSWITCHER_32_TO_AMD64
+ || enmSwitcher == VMMSWITCHER_PAE_TO_AMD64,
+ ("%d\n", enmSwitcher),
+ NIL_RTR0PTR);
+ AssertReturn(HMIsEnabled(pVM), NIL_RTR0PTR);
/*
* Select the new switcher.
*/
- PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher];
+ const PVMMSWITCHERDEF *papSwitchers = g_apHmSwitchers;
+ PVMMSWITCHERDEF pSwitcher = papSwitchers[enmSwitcher];
if (pSwitcher)
{
- RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvCoreCodeR0 type */
+ /** @todo fix the pvCoreCodeR0 type */
+ RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher];
return pbCodeR0 + pSwitcher->offR0ToRawMode;
}
return NIL_RTR0PTR;
diff --git a/src/VBox/VMM/VMMR3/VMMTests.cpp b/src/VBox/VMM/VMMR3/VMMTests.cpp
index 35616107..066c3c31 100644
--- a/src/VBox/VMM/VMMR3/VMMTests.cpp
+++ b/src/VBox/VMM/VMMR3/VMMTests.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -26,6 +26,7 @@
#include <VBox/vmm/pdmapi.h>
#include <VBox/vmm/cpum.h>
#include <VBox/dbg.h>
+#include <VBox/vmm/hm.h>
#include <VBox/vmm/mm.h>
#include <VBox/vmm/trpm.h>
#include <VBox/vmm/selm.h>
@@ -33,7 +34,6 @@
#include <VBox/vmm/vm.h>
#include <VBox/err.h>
#include <VBox/param.h>
-#include <VBox/vmm/hwaccm.h>
#include <iprt/assert.h>
#include <iprt/asm.h>
@@ -50,6 +50,139 @@ static void vmmR3TestClearStack(PVMCPU pVCpu)
}
+#ifdef VBOX_WITH_RAW_MODE
+
+static int vmmR3ReportMsrRange(PVM pVM, uint32_t uMsr, uint64_t cMsrs, PRTSTREAM pReportStrm, uint32_t *pcMsrsFound)
+{
+ /*
+ * Preps.
+ */
+ RTRCPTR RCPtrEP;
+ int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMRCTestReadMsrs", &RCPtrEP);
+ AssertMsgRCReturn(rc, ("Failed to resolved VMMRC.rc::VMMRCEntry(), rc=%Rrc\n", rc), rc);
+
+ uint32_t const cMsrsPerCall = 16384;
+ uint32_t cbResults = cMsrsPerCall * sizeof(VMMTESTMSRENTRY);
+ PVMMTESTMSRENTRY paResults;
+ rc = MMHyperAlloc(pVM, cbResults, 0, MM_TAG_VMM, (void **)&paResults);
+ AssertMsgRCReturn(rc, ("Error allocating %#x bytes off the hyper heap: %Rrc\n", cbResults, rc), rc);
+ /*
+ * The loop.
+ */
+ RTRCPTR RCPtrResults = MMHyperR3ToRC(pVM, paResults);
+ uint32_t cMsrsFound = 0;
+ uint32_t uLastMsr = uMsr;
+ uint64_t uNsTsStart = RTTimeNanoTS();
+
+ for (;;)
+ {
+ if ( pReportStrm
+ && uMsr - uLastMsr > _64K
+ && (uMsr & (_4M - 1)) == 0)
+ {
+ if (uMsr - uLastMsr < 16U*_1M)
+ RTStrmFlush(pReportStrm);
+ RTPrintf("... %#010x [%u ns/msr] ...\n", uMsr, (RTTimeNanoTS() - uNsTsStart) / uMsr);
+ }
+
+ /*RT_BZERO(paResults, cbResults);*/
+ uint32_t const cBatch = RT_MIN(cMsrsPerCall, cMsrs);
+ rc = VMMR3CallRC(pVM, RCPtrEP, 4, pVM->pVMRC, uMsr, cBatch, RCPtrResults);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("VMM: VMMR3CallRC failed rc=%Rrc, uMsr=%#x\n", rc, uMsr);
+ break;
+ }
+
+ for (uint32_t i = 0; i < cBatch; i++)
+ if (paResults[i].uMsr != UINT64_MAX)
+ {
+ if (paResults[i].uValue == 0)
+ {
+ if (pReportStrm)
+ RTStrmPrintf(pReportStrm,
+ " MVO(%#010llx, \"MSR\", UINT64_C(%#018llx)),\n", paResults[i].uMsr, paResults[i].uValue);
+ RTPrintf("%#010llx = 0\n", paResults[i].uMsr);
+ }
+ else
+ {
+ if (pReportStrm)
+ RTStrmPrintf(pReportStrm,
+ " MVO(%#010llx, \"MSR\", UINT64_C(%#018llx)),\n", paResults[i].uMsr, paResults[i].uValue);
+ RTPrintf("%#010llx = %#010x`%08x\n", paResults[i].uMsr,
+ (uint32_t)(paResults[i].uValue >> 32), (uint32_t)paResults[i].uValue);
+ }
+ cMsrsFound++;
+ uLastMsr = paResults[i].uMsr;
+ }
+
+ /* Advance. */
+ if (cMsrs <= cMsrsPerCall)
+ break;
+ cMsrs -= cMsrsPerCall;
+ uMsr += cMsrsPerCall;
+ }
+
+ *pcMsrsFound += cMsrsFound;
+ MMHyperFree(pVM, paResults);
+ return rc;
+}
+
+
+/**
+ * Produces a quick report of MSRs.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the cross context VM structure.
+ * @param pReportStrm Pointer to the report output stream. Optional.
+ * @param fWithCpuId Whether CPUID should be included.
+ */
+static int vmmR3DoMsrQuickReport(PVM pVM, PRTSTREAM pReportStrm, bool fWithCpuId)
+{
+ uint64_t uTsStart = RTTimeNanoTS();
+ RTPrintf("=== MSR Quick Report Start ===\n");
+ RTStrmFlush(g_pStdOut);
+ if (fWithCpuId)
+ {
+ DBGFR3InfoStdErr(pVM->pUVM, "cpuid", "verbose");
+ RTPrintf("\n");
+ }
+ if (pReportStrm)
+ RTStrmPrintf(pReportStrm, "\n\n{\n");
+
+ static struct { uint32_t uFirst, cMsrs; } const s_aRanges[] =
+ {
+ { 0x00000000, 0x00042000 },
+ { 0x10000000, 0x00001000 },
+ { 0x20000000, 0x00001000 },
+ { 0x40000000, 0x00012000 },
+ { 0x80000000, 0x00012000 },
+// Need 0xc0000000..0xc001106f (at least), but trouble on solaris w/ 10h and 0fh family cpus:
+// { 0xc0000000, 0x00022000 },
+ { 0xc0000000, 0x00010000 },
+ { 0xc0010000, 0x00001040 },
+ { 0xc0011040, 0x00004040 }, /* should cause trouble... */
+ };
+ uint32_t cMsrsFound = 0;
+ int rc = VINF_SUCCESS;
+ for (unsigned i = 0; i < RT_ELEMENTS(s_aRanges) && RT_SUCCESS(rc); i++)
+ {
+//if (i >= 3)
+//{
+//RTStrmFlush(g_pStdOut);
+//RTThreadSleep(40);
+//}
+ rc = vmmR3ReportMsrRange(pVM, s_aRanges[i].uFirst, s_aRanges[i].cMsrs, pReportStrm, &cMsrsFound);
+ }
+
+ if (pReportStrm)
+ RTStrmPrintf(pReportStrm, "}; /* %u (%#x) MSRs; rc=%Rrc */\n", cMsrsFound, cMsrsFound, rc);
+ RTPrintf("Total %u (%#x) MSRs\n", cMsrsFound, cMsrsFound);
+ RTPrintf("=== MSR Quick Report End (rc=%Rrc, %'llu ns) ===\n", rc, RTTimeNanoTS() - uTsStart);
+ return rc;
+}
+
+
/**
* Performs a testcase.
*
@@ -67,6 +200,7 @@ static int vmmR3DoGCTest(PVM pVM, VMMGCOPERATION enmTestcase, unsigned uVariatio
if (RT_FAILURE(rc))
return rc;
+ Log(("vmmR3DoGCTest: %d %#x\n", enmTestcase, uVariation));
CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
vmmR3TestClearStack(pVCpu);
CPUMPushHyper(pVCpu, uVariation);
@@ -76,6 +210,23 @@ static int vmmR3DoGCTest(PVM pVM, VMMGCOPERATION enmTestcase, unsigned uVariatio
CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
+
+#if 1
+ /* flush the raw-mode logs. */
+# ifdef LOG_ENABLED
+ PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
+ if ( pLogger
+ && pLogger->offScratch > 0)
+ RTLogFlushRC(NULL, pLogger);
+# endif
+# ifdef VBOX_WITH_RC_RELEASE_LOGGING
+ PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
+ if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
+ RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
+# endif
+#endif
+
+ Log(("vmmR3DoGCTest: rc=%Rrc iLastGZRc=%Rrc\n", rc, pVCpu->vmm.s.iLastGZRc));
if (RT_LIKELY(rc == VINF_SUCCESS))
rc = pVCpu->vmm.s.iLastGZRc;
return rc;
@@ -176,23 +327,28 @@ static int vmmR3DoTrapTest(PVM pVM, uint8_t u8Trap, unsigned uVariation, int rcE
return rc;
}
+#endif /* VBOX_WITH_RAW_MODE */
+
/* execute the switch. */
VMMR3DECL(int) VMMDoTest(PVM pVM)
{
-#if 1
+ int rc = VINF_SUCCESS;
+
+#ifdef VBOX_WITH_RAW_MODE
PVMCPU pVCpu = &pVM->aCpus[0];
+ PUVM pUVM = pVM->pUVM;
-#ifdef NO_SUPCALLR0VMM
+# ifdef NO_SUPCALLR0VMM
RTPrintf("NO_SUPCALLR0VMM\n");
- return VINF_SUCCESS;
-#endif
+ return rc;
+# endif
/*
* Setup stack for calling VMMGCEntry().
*/
RTRCPTR RCPtrEP;
- int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &RCPtrEP);
+ rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &RCPtrEP);
if (RT_SUCCESS(rc))
{
RTPrintf("VMM: VMMGCEntry=%RRv\n", RCPtrEP);
@@ -203,20 +359,20 @@ VMMR3DECL(int) VMMDoTest(PVM pVM)
vmmR3DoTrapTest(pVM, 0x3, 0, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3");
vmmR3DoTrapTest(pVM, 0x3, 1, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3 WP");
-#if defined(DEBUG_bird) /* guess most people would like to skip these since they write to com1. */
+# if 0//defined(DEBUG_bird) /* guess most people would like to skip these since they write to com1. */
vmmR3DoTrapTest(pVM, 0x8, 0, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG]");
SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */
bool f;
rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "DoubleFault", &f);
-#if !defined(DEBUG_bird)
+# if !defined(DEBUG_bird)
if (RT_SUCCESS(rc) && f)
-#endif
+# endif
{
/* see triple fault warnings in SELM and VMMGC.cpp. */
vmmR3DoTrapTest(pVM, 0x8, 1, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG] WP");
SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */
}
-#endif
+# endif
vmmR3DoTrapTest(pVM, 0xd, 0, VERR_TRPM_DONT_PANIC, 0xf0f0f0f0, "vmmGCTestTrap0d_FaultEIP", "ltr #GP");
///@todo find a better \#GP case, on intel ltr will \#PF (busy update?) and not \#GP.
@@ -238,34 +394,34 @@ VMMR3DECL(int) VMMDoTest(PVM pVM)
if (rc != VINF_SUCCESS)
{
RTPrintf("VMM: Nop test failed, rc=%Rrc not VINF_SUCCESS\n", rc);
- return rc;
+ return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
}
/* a harmless breakpoint */
RTPrintf("VMM: testing hardware bp at 0x10000 (not hit)\n");
DBGFADDRESS Addr;
- DBGFR3AddrFromFlat(pVM, &Addr, 0x10000);
+ DBGFR3AddrFromFlat(pUVM, &Addr, 0x10000);
RTUINT iBp0;
- rc = DBGFR3BpSetReg(pVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp0);
+ rc = DBGFR3BpSetReg(pUVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp0);
AssertReleaseRC(rc);
rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0);
if (rc != VINF_SUCCESS)
{
RTPrintf("VMM: DR0=0x10000 test failed with rc=%Rrc!\n", rc);
- return rc;
+ return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
}
/* a bad one at VMMGCEntry */
RTPrintf("VMM: testing hardware bp at VMMGCEntry (hit)\n");
- DBGFR3AddrFromFlat(pVM, &Addr, RCPtrEP);
+ DBGFR3AddrFromFlat(pUVM, &Addr, RCPtrEP);
RTUINT iBp1;
- rc = DBGFR3BpSetReg(pVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp1);
+ rc = DBGFR3BpSetReg(pUVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp1);
AssertReleaseRC(rc);
rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0);
if (rc != VINF_EM_DBG_HYPER_BREAKPOINT)
{
RTPrintf("VMM: DR1=VMMGCEntry test failed with rc=%Rrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc);
- return rc;
+ return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
}
/* resume the breakpoint */
@@ -275,7 +431,7 @@ VMMR3DECL(int) VMMDoTest(PVM pVM)
if (rc != VINF_SUCCESS)
{
RTPrintf("VMM: failed to resume on hyper breakpoint, rc=%Rrc = KNOWN BUG\n", rc); /** @todo fix VMMR3ResumeHyper */
- return rc;
+ return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
}
/* engage the breakpoint again and try single stepping. */
@@ -284,7 +440,7 @@ VMMR3DECL(int) VMMDoTest(PVM pVM)
if (rc != VINF_EM_DBG_HYPER_BREAKPOINT)
{
RTPrintf("VMM: DR1=VMMGCEntry test failed with rc=%Rrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc);
- return rc;
+ return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
}
RTGCUINTREG OldPc = CPUMGetHyperEIP(pVCpu);
@@ -297,7 +453,7 @@ VMMR3DECL(int) VMMDoTest(PVM pVM)
if (rc != VINF_EM_DBG_HYPER_STEPPED)
{
RTPrintf("\nVMM: failed to step on hyper breakpoint, rc=%Rrc\n", rc);
- return rc;
+ return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
}
RTGCUINTREG Pc = CPUMGetHyperEIP(pVCpu);
RTPrintf("%RGr=>", Pc);
@@ -311,8 +467,8 @@ VMMR3DECL(int) VMMDoTest(PVM pVM)
RTPrintf("ok\n");
/* done, clear it */
- if ( RT_FAILURE(DBGFR3BpClear(pVM, iBp0))
- || RT_FAILURE(DBGFR3BpClear(pVM, iBp1)))
+ if ( RT_FAILURE(DBGFR3BpClear(pUVM, iBp0))
+ || RT_FAILURE(DBGFR3BpClear(pUVM, iBp1)))
{
RTPrintf("VMM: Failed to clear breakpoints!\n");
return VERR_GENERAL_FAILURE;
@@ -321,11 +477,11 @@ VMMR3DECL(int) VMMDoTest(PVM pVM)
if (rc != VINF_SUCCESS)
{
RTPrintf("VMM: NOP failed, rc=%Rrc\n", rc);
- return rc;
+ return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
}
/*
- * Interrupt masking.
+ * Interrupt masking. Failure may indiate NMI watchdog activity.
*/
RTPrintf("VMM: interrupt masking...\n"); RTStrmFlush(g_pStdOut); RTThreadSleep(250);
for (i = 0; i < 10000; i++)
@@ -335,7 +491,7 @@ VMMR3DECL(int) VMMDoTest(PVM pVM)
if (rc != VINF_SUCCESS)
{
RTPrintf("VMM: Interrupt masking failed: rc=%Rrc\n", rc);
- return rc;
+ return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
}
uint64_t Ticks = ASMReadTSC() - StartTick;
if (Ticks < (SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage) / 10000))
@@ -442,6 +598,13 @@ VMMR3DECL(int) VMMDoTest(PVM pVM)
i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin));
rc = VINF_SUCCESS;
+
+#if 0 /* drop this for now as it causes trouble on AMDs (Opteron 2384 and possibly others). */
+ /*
+ * A quick MSR report.
+ */
+ vmmR3DoMsrQuickReport(pVM, NULL, true);
+#endif
}
else
AssertMsgFailed(("Failed to resolved VMMGC.gc::VMMGCEntry(), rc=%Rrc\n", rc));
@@ -468,7 +631,7 @@ VMMR3DECL(int) VMMDoTest(PVM pVM)
}
/* execute the switch. */
-VMMR3DECL(int) VMMDoHwAccmTest(PVM pVM)
+VMMR3DECL(int) VMMDoHmTest(PVM pVM)
{
uint32_t i;
int rc;
@@ -476,17 +639,19 @@ VMMR3DECL(int) VMMDoHwAccmTest(PVM pVM)
RTGCPHYS CR3Phys = 0x0; /* fake address */
PVMCPU pVCpu = &pVM->aCpus[0];
- if (!HWACCMR3IsAllowed(pVM))
+ if (!HMIsEnabled(pVM))
{
RTPrintf("VMM: Hardware accelerated test not available!\n");
return VERR_ACCESS_DENIED;
}
+#ifdef VBOX_WITH_RAW_MODE
/*
* These forced actions are not necessary for the test and trigger breakpoints too.
*/
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
+#endif
/* Enable mapping of the hypervisor into the shadow page table. */
uint32_t cb;
@@ -541,7 +706,7 @@ VMMR3DECL(int) VMMDoHwAccmTest(PVM pVM)
{
CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
CPUMPushHyper(pVCpu, 0);
- CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_HWACCM_NOP);
+ CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_HM_NOP);
CPUMPushHyper(pVCpu, pVM->pVMRC);
CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */
CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
@@ -558,7 +723,7 @@ VMMR3DECL(int) VMMDoHwAccmTest(PVM pVM)
VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
uint64_t TickThisStart = ASMReadTSC();
- rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HWACC_RUN, 0);
+ rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HM_RUN, 0);
uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart;
if (RT_FAILURE(rc))
{
@@ -590,3 +755,199 @@ VMMR3DECL(int) VMMDoHwAccmTest(PVM pVM)
return rc;
}
+
+#ifdef VBOX_WITH_RAW_MODE
+
+/**
+ * Used by VMMDoBruteForceMsrs to dump the CPUID info of the host CPU as a
+ * prefix to the MSR report.
+ */
+static DECLCALLBACK(void) vmmDoPrintfVToStream(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list va)
+{
+ PRTSTREAM pOutStrm = ((PRTSTREAM *)pHlp)[-1];
+ RTStrmPrintfV(pOutStrm, pszFormat, va);
+}
+
+/**
+ * Used by VMMDoBruteForceMsrs to dump the CPUID info of the host CPU as a
+ * prefix to the MSR report.
+ */
+static DECLCALLBACK(void) vmmDoPrintfToStream(PCDBGFINFOHLP pHlp, const char *pszFormat, ...)
+{
+ va_list va;
+ va_start(va, pszFormat);
+ vmmDoPrintfVToStream(pHlp, pszFormat, va);
+ va_end(va);
+}
+
+#endif
+
+
+/**
+ * Uses raw-mode to query all possible MSRs on the real hardware.
+ *
+ * This generates a msr-report.txt file (appending, no overwriting) as well as
+ * writing the values and process to stdout.
+ *
+ * @returns VBox status code.
+ * @param pVM The VM handle.
+ */
+VMMR3DECL(int) VMMDoBruteForceMsrs(PVM pVM)
+{
+#ifdef VBOX_WITH_RAW_MODE
+ PRTSTREAM pOutStrm;
+ int rc = RTStrmOpen("msr-report.txt", "a", &pOutStrm);
+ if (RT_SUCCESS(rc))
+ {
+ /* Header */
+ struct
+ {
+ PRTSTREAM pOutStrm;
+ DBGFINFOHLP Hlp;
+ } MyHlp = { pOutStrm, { vmmDoPrintfToStream, vmmDoPrintfVToStream } };
+ DBGFR3Info(pVM->pUVM, "cpuid", "verbose", &MyHlp.Hlp);
+ RTStrmPrintf(pOutStrm, "\n");
+
+ uint32_t cMsrsFound = 0;
+ vmmR3ReportMsrRange(pVM, 0, _4G, pOutStrm, &cMsrsFound);
+
+ RTStrmPrintf(pOutStrm, "Total %u (%#x) MSRs\n", cMsrsFound, cMsrsFound);
+ RTPrintf("Total %u (%#x) MSRs\n", cMsrsFound, cMsrsFound);
+
+ RTStrmClose(pOutStrm);
+ }
+ return rc;
+#else
+ return VERR_NOT_SUPPORTED;
+#endif
+}
+
+
+/**
+ * Uses raw-mode to query all known MSRS on the real hardware.
+ *
+ * This generates a known-msr-report.txt file (appending, no overwriting) as
+ * well as writing the values and process to stdout.
+ *
+ * @returns VBox status code.
+ * @param pVM The VM handle.
+ */
+VMMR3DECL(int) VMMDoKnownMsrs(PVM pVM)
+{
+#ifdef VBOX_WITH_RAW_MODE
+ PRTSTREAM pOutStrm;
+ int rc = RTStrmOpen("known-msr-report.txt", "a", &pOutStrm);
+ if (RT_SUCCESS(rc))
+ {
+ vmmR3DoMsrQuickReport(pVM, pOutStrm, false);
+ RTStrmClose(pOutStrm);
+ }
+ return rc;
+#else
+ return VERR_NOT_SUPPORTED;
+#endif
+}
+
+
+/**
+ * MSR experimentation.
+ *
+ * @returns VBox status code.
+ * @param pVM The VM handle.
+ */
+VMMR3DECL(int) VMMDoMsrExperiments(PVM pVM)
+{
+#ifdef VBOX_WITH_RAW_MODE
+ /*
+ * Preps.
+ */
+ RTRCPTR RCPtrEP;
+ int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMRCTestTestWriteMsr", &RCPtrEP);
+ AssertMsgRCReturn(rc, ("Failed to resolved VMMRC.rc::VMMRCEntry(), rc=%Rrc\n", rc), rc);
+
+ uint64_t *pauValues;
+ rc = MMHyperAlloc(pVM, 2 * sizeof(uint64_t), 0, MM_TAG_VMM, (void **)&pauValues);
+ AssertMsgRCReturn(rc, ("Error allocating %#x bytes off the hyper heap: %Rrc\n", 2 * sizeof(uint64_t), rc), rc);
+ RTRCPTR RCPtrValues = MMHyperR3ToRC(pVM, pauValues);
+
+ /*
+ * Do the experiments.
+ */
+ uint32_t uMsr = 0x00000277;
+ uint64_t uValue = UINT64_C(0x0007010600070106);
+#if 0
+ uValue &= ~(RT_BIT_64(17) | RT_BIT_64(16) | RT_BIT_64(15) | RT_BIT_64(14) | RT_BIT_64(13));
+ uValue |= RT_BIT_64(13);
+ rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
+ RCPtrValues, RCPtrValues + sizeof(uint64_t));
+ RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
+ uMsr, pauValues[0], uValue, pauValues[1], rc);
+#elif 1
+ const uint64_t uOrgValue = uValue;
+ uint32_t cChanges = 0;
+ for (int iBit = 63; iBit >= 58; iBit--)
+ {
+ uValue = uOrgValue & ~RT_BIT_64(iBit);
+ rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
+ RCPtrValues, RCPtrValues + sizeof(uint64_t));
+ RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\nclear bit=%u -> %s\n",
+ uMsr, pauValues[0], uValue, pauValues[1], rc, iBit,
+ (pauValues[0] ^ pauValues[1]) & RT_BIT_64(iBit) ? "changed" : "unchanged");
+ cChanges += RT_BOOL(pauValues[0] ^ pauValues[1]);
+
+ uValue = uOrgValue | RT_BIT_64(iBit);
+ rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
+ RCPtrValues, RCPtrValues + sizeof(uint64_t));
+ RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\nset bit=%u -> %s\n",
+ uMsr, pauValues[0], uValue, pauValues[1], rc, iBit,
+ (pauValues[0] ^ pauValues[1]) & RT_BIT_64(iBit) ? "changed" : "unchanged");
+ cChanges += RT_BOOL(pauValues[0] ^ pauValues[1]);
+ }
+ RTPrintf("%u change(s)\n", cChanges);
+#else
+ uint64_t fWriteable = 0;
+ for (uint32_t i = 0; i <= 63; i++)
+ {
+ uValue = RT_BIT_64(i);
+# if 0
+ if (uValue & (0x7))
+ continue;
+# endif
+ rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
+ RCPtrValues, RCPtrValues + sizeof(uint64_t));
+ RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
+ uMsr, pauValues[0], uValue, pauValues[1], rc);
+ if (RT_SUCCESS(rc))
+ fWriteable |= RT_BIT_64(i);
+ }
+
+ uValue = 0;
+ rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
+ RCPtrValues, RCPtrValues + sizeof(uint64_t));
+ RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
+ uMsr, pauValues[0], uValue, pauValues[1], rc);
+
+ uValue = UINT64_MAX;
+ rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
+ RCPtrValues, RCPtrValues + sizeof(uint64_t));
+ RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
+ uMsr, pauValues[0], uValue, pauValues[1], rc);
+
+ uValue = fWriteable;
+ rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
+ RCPtrValues, RCPtrValues + sizeof(uint64_t));
+ RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc [fWriteable]\n",
+ uMsr, pauValues[0], uValue, pauValues[1], rc);
+
+#endif
+
+ /*
+ * Cleanups.
+ */
+ MMHyperFree(pVM, pauValues);
+ return rc;
+#else
+ return VERR_NOT_SUPPORTED;
+#endif
+}
+
diff --git a/src/VBox/VMM/VMMR3/VMReq.cpp b/src/VBox/VMM/VMMR3/VMReq.cpp
index 11ac3ead..ec3ab798 100644
--- a/src/VBox/VMM/VMMR3/VMReq.cpp
+++ b/src/VBox/VMM/VMMR3/VMReq.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -44,40 +44,38 @@ static int vmR3ReqProcessOneU(PUVM pUVM, PVMREQ pReq);
/**
- * Allocate and queue a call request.
+ * Convenience wrapper for VMR3ReqCallU.
*
- * If it's desired to poll on the completion of the request set cMillies
- * to 0 and use VMR3ReqWait() to check for completion. In the other case
- * use RT_INDEFINITE_WAIT.
- * The returned request packet must be freed using VMR3ReqFree().
+ * This assumes (1) you're calling a function that returns an VBox status code,
+ * (2) that you want it's return code on success, and (3) that you wish to wait
+ * for ever for it to return.
*
- * @returns VBox status code.
- * Will not return VERR_INTERRUPTED.
- * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
+ * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
+ * its status code is return. Otherwise, the status of pfnFunction is
+ * returned.
*
* @param pVM Pointer to the VM.
* @param idDstCpu The destination CPU(s). Either a specific CPU ID or
* one of the following special values:
* VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
- * @param ppReq Where to store the pointer to the request.
- * This will be NULL or a valid request pointer not matter what happens.
- * @param cMillies Number of milliseconds to wait for the request to
- * be completed. Use RT_INDEFINITE_WAIT to only
- * wait till it's completed.
- * @param fFlags A combination of the VMREQFLAGS values.
* @param pfnFunction Pointer to the function to call.
* @param cArgs Number of arguments following in the ellipsis.
* @param ... Function arguments.
*
* @remarks See remarks on VMR3ReqCallVU.
+ * @internal
*/
-VMMR3DECL(int) VMR3ReqCall(PVM pVM, VMCPUID idDstCpu, PVMREQ *ppReq, RTMSINTERVAL cMillies, uint32_t fFlags,
- PFNRT pfnFunction, unsigned cArgs, ...)
+VMMR3_INT_DECL(int) VMR3ReqCallWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
{
+ PVMREQ pReq;
va_list va;
va_start(va, cArgs);
- int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
+ int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
+ pfnFunction, cArgs, va);
va_end(va);
+ if (RT_SUCCESS(rc))
+ rc = pReq->iStatus;
+ VMR3ReqFree(pReq);
return rc;
}
@@ -102,13 +100,14 @@ VMMR3DECL(int) VMR3ReqCall(PVM pVM, VMCPUID idDstCpu, PVMREQ *ppReq, RTMSINTERVA
* @param ... Function arguments.
*
* @remarks See remarks on VMR3ReqCallVU.
+ * @internal
*/
-VMMR3DECL(int) VMR3ReqCallWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
+VMMR3DECL(int) VMR3ReqCallWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
{
PVMREQ pReq;
va_list va;
va_start(va, cArgs);
- int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
+ int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
pfnFunction, cArgs, va);
va_end(va);
if (RT_SUCCESS(rc))
@@ -135,6 +134,7 @@ VMMR3DECL(int) VMR3ReqCallWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, uns
* @param ... Function arguments.
*
* @remarks See remarks on VMR3ReqCallVU.
+ * @internal
*/
VMMR3DECL(int) VMR3ReqCallNoWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
{
@@ -150,6 +150,35 @@ VMMR3DECL(int) VMR3ReqCallNoWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, u
/**
* Convenience wrapper for VMR3ReqCallU.
*
+ * This assumes (1) you're calling a function that returns an VBox status code
+ * and that you do not wish to wait for it to complete.
+ *
+ * @returns VBox status code returned by VMR3ReqCallVU.
+ *
+ * @param pUVM Pointer to the VM.
+ * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
+ * one of the following special values:
+ * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
+ * @param pfnFunction Pointer to the function to call.
+ * @param cArgs Number of arguments following in the ellipsis.
+ * @param ... Function arguments.
+ *
+ * @remarks See remarks on VMR3ReqCallVU.
+ */
+VMMR3DECL(int) VMR3ReqCallNoWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
+{
+ va_list va;
+ va_start(va, cArgs);
+ int rc = VMR3ReqCallVU(pUVM, idDstCpu, NULL, 0, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_NO_WAIT,
+ pfnFunction, cArgs, va);
+ va_end(va);
+ return rc;
+}
+
+
+/**
+ * Convenience wrapper for VMR3ReqCallU.
+ *
* This assumes (1) you're calling a function that returns void, and (2) that
* you wish to wait for ever for it to return.
*
@@ -164,8 +193,9 @@ VMMR3DECL(int) VMR3ReqCallNoWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, u
* @param ... Function arguments.
*
* @remarks See remarks on VMR3ReqCallVU.
+ * @internal
*/
-VMMR3DECL(int) VMR3ReqCallVoidWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
+VMMR3_INT_DECL(int) VMR3ReqCallVoidWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
{
PVMREQ pReq;
va_list va;
@@ -182,6 +212,37 @@ VMMR3DECL(int) VMR3ReqCallVoidWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction,
* Convenience wrapper for VMR3ReqCallU.
*
* This assumes (1) you're calling a function that returns void, and (2) that
+ * you wish to wait for ever for it to return.
+ *
+ * @returns VBox status code of VMR3ReqCallVU.
+ *
+ * @param pUVM Pointer to the VM.
+ * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
+ * one of the following special values:
+ * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
+ * @param pfnFunction Pointer to the function to call.
+ * @param cArgs Number of arguments following in the ellipsis.
+ * @param ... Function arguments.
+ *
+ * @remarks See remarks on VMR3ReqCallVU.
+ */
+VMMR3DECL(int) VMR3ReqCallVoidWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
+{
+ PVMREQ pReq;
+ va_list va;
+ va_start(va, cArgs);
+ int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID,
+ pfnFunction, cArgs, va);
+ va_end(va);
+ VMR3ReqFree(pReq);
+ return rc;
+}
+
+
+/**
+ * Convenience wrapper for VMR3ReqCallU.
+ *
+ * This assumes (1) you're calling a function that returns void, and (2) that
* you do not wish to wait for it to complete.
*
* @returns VBox status code of VMR3ReqCallVU.
@@ -195,6 +256,7 @@ VMMR3DECL(int) VMR3ReqCallVoidWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction,
* @param ... Function arguments.
*
* @remarks See remarks on VMR3ReqCallVU.
+ * @internal
*/
VMMR3DECL(int) VMR3ReqCallVoidNoWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
{
@@ -230,6 +292,7 @@ VMMR3DECL(int) VMR3ReqCallVoidNoWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunctio
* @param ... Function arguments.
*
* @remarks See remarks on VMR3ReqCallVU.
+ * @internal
*/
VMMR3DECL(int) VMR3ReqPriorityCallWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
{
@@ -249,13 +312,50 @@ VMMR3DECL(int) VMR3ReqPriorityCallWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunct
/**
* Convenience wrapper for VMR3ReqCallU.
*
+ * This assumes (1) you're calling a function that returns an VBox status code,
+ * (2) that you want it's return code on success, (3) that you wish to wait for
+ * ever for it to return, and (4) that it's priority request that can be safely
+ * be handled during async suspend and power off.
+ *
+ * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
+ * its status code is return. Otherwise, the status of pfnFunction is
+ * returned.
+ *
+ * @param pUVM The user mode VM handle.
+ * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
+ * one of the following special values:
+ * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
+ * @param pfnFunction Pointer to the function to call.
+ * @param cArgs Number of arguments following in the ellipsis.
+ * @param ... Function arguments.
+ *
+ * @remarks See remarks on VMR3ReqCallVU.
+ */
+VMMR3DECL(int) VMR3ReqPriorityCallWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
+{
+ PVMREQ pReq;
+ va_list va;
+ va_start(va, cArgs);
+ int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_PRIORITY,
+ pfnFunction, cArgs, va);
+ va_end(va);
+ if (RT_SUCCESS(rc))
+ rc = pReq->iStatus;
+ VMR3ReqFree(pReq);
+ return rc;
+}
+
+
+/**
+ * Convenience wrapper for VMR3ReqCallU.
+ *
* This assumes (1) you're calling a function that returns void, (2) that you
* wish to wait for ever for it to return, and (3) that it's priority request
* that can be safely be handled during async suspend and power off.
*
* @returns VBox status code of VMR3ReqCallVU.
*
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param idDstCpu The destination CPU(s). Either a specific CPU ID or
* one of the following special values:
* VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
@@ -265,12 +365,12 @@ VMMR3DECL(int) VMR3ReqPriorityCallWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunct
*
* @remarks See remarks on VMR3ReqCallVU.
*/
-VMMR3DECL(int) VMR3ReqPriorityCallVoidWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
+VMMR3DECL(int) VMR3ReqPriorityCallVoidWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
{
PVMREQ pReq;
va_list va;
va_start(va, cArgs);
- int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID | VMREQFLAGS_PRIORITY,
+ int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID | VMREQFLAGS_PRIORITY,
pfnFunction, cArgs, va);
va_end(va);
VMR3ReqFree(pReq);
@@ -364,7 +464,7 @@ VMMR3DECL(int) VMR3ReqCallVU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, RTMSINT
* Validate input.
*/
AssertPtrReturn(pfnFunction, VERR_INVALID_POINTER);
- AssertPtrReturn(pUVM, VERR_INVALID_POINTER);
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
AssertReturn(!(fFlags & ~(VMREQFLAGS_RETURN_MASK | VMREQFLAGS_NO_WAIT | VMREQFLAGS_POKE | VMREQFLAGS_PRIORITY)), VERR_INVALID_PARAMETER);
if (!(fFlags & VMREQFLAGS_NO_WAIT) || ppReq)
{
@@ -379,7 +479,7 @@ VMMR3DECL(int) VMR3ReqCallVU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, RTMSINT
/*
* Allocate request
*/
- int rc = VMR3ReqAllocU(pUVM, &pReq, VMREQTYPE_INTERNAL, idDstCpu);
+ int rc = VMR3ReqAlloc(pUVM, &pReq, VMREQTYPE_INTERNAL, idDstCpu);
if (RT_FAILURE(rc))
return rc;
@@ -477,27 +577,6 @@ static void vmr3ReqJoinFree(PVMINTUSERPERVM pVMInt, PVMREQ pList)
*
* @returns VBox status code.
*
- * @param pVM Pointer to the VM.
- * @param ppReq Where to store the pointer to the allocated packet.
- * @param enmType Package type.
- * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
- * one of the following special values:
- * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
- */
-VMMR3DECL(int) VMR3ReqAlloc(PVM pVM, PVMREQ *ppReq, VMREQTYPE enmType, VMCPUID idDstCpu)
-{
- return VMR3ReqAllocU(pVM->pUVM, ppReq, enmType, idDstCpu);
-}
-
-
-/**
- * Allocates a request packet.
- *
- * The caller allocates a request packet, fills in the request data
- * union and queues the request.
- *
- * @returns VBox status code.
- *
* @param pUVM Pointer to the user mode VM structure.
* @param ppReq Where to store the pointer to the allocated packet.
* @param enmType Package type.
@@ -505,7 +584,7 @@ VMMR3DECL(int) VMR3ReqAlloc(PVM pVM, PVMREQ *ppReq, VMREQTYPE enmType, VMCPUID i
* one of the following special values:
* VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
*/
-VMMR3DECL(int) VMR3ReqAllocU(PUVM pUVM, PVMREQ *ppReq, VMREQTYPE enmType, VMCPUID idDstCpu)
+VMMR3DECL(int) VMR3ReqAlloc(PUVM pUVM, PVMREQ *ppReq, VMREQTYPE enmType, VMCPUID idDstCpu)
{
/*
* Validate input.
@@ -1020,8 +1099,9 @@ static PVMREQ vmR3ReqProcessUTooManyHelper(PUVM pUVM, VMCPUID idDstCpu, PVMREQ p
*
* @remarks This was made reentrant for async PDM handling, the debugger and
* others.
+ * @internal
*/
-VMMR3DECL(int) VMR3ReqProcessU(PUVM pUVM, VMCPUID idDstCpu, bool fPriorityOnly)
+VMMR3_INT_DECL(int) VMR3ReqProcessU(PUVM pUVM, VMCPUID idDstCpu, bool fPriorityOnly)
{
LogFlow(("VMR3ReqProcessU: (enmVMState=%d) idDstCpu=%d\n", pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING, idDstCpu));
diff --git a/src/VBox/VMM/VMMR3/cpus/AMD_Athlon_64_3200.h b/src/VBox/VMM/VMMR3/cpus/AMD_Athlon_64_3200.h
new file mode 100644
index 00000000..774e2ae4
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/AMD_Athlon_64_3200.h
@@ -0,0 +1,220 @@
+/* $Id: AMD_Athlon_64_3200.h $ */
+/** @file
+ * CPU database entry "AMD Athlon 64 3200+".
+ * Generated at 2013-07-12T02:09:05Z by VBoxCpuReport v4.3.53r91376 on win.x86.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VBOX_CPUDB_AMD_Athlon_64_3200
+#define VBOX_CPUDB_AMD_Athlon_64_3200
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for AMD Athlon(tm) 64 Processor 3200+.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_AMD_Athlon_64_3200[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00000f48, 0x00000800, 0x00000000, 0x078bfbff, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000018, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000f48, 0x0000010a, 0x00000000, 0xe1d3fbff, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20444d41, 0x6c687441, 0x74286e6f, 0x3620296d, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x72502034, 0x7365636f, 0x20726f73, 0x30303233, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x0000002b, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0xff08ff08, 0xff20ff20, 0x40020140, 0x40020140, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x42004200, 0x04008140, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000000f, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003028, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000a, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000d, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000010, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000012, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000013, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000014, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000015, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000016, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000017, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8fffffff, 0x00000000, 0x00000000, 0x53275449, 0x4d414820, 0x2052454d, 0x454d4954, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for AMD Athlon(tm) 64 Processor 3200+.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_AMD_Athlon_64_3200[] =
+{
+ MAL(0x00000000, "IA32_P5_MC_ADDR", 0x00000402),
+ MAL(0x00000001, "IA32_P5_MC_TYPE", 0x00000401),
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x28`4505cb65 */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00900), 0, UINT64_C(0xffffff00000006ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFO(0x0000008b, "AMD_K8_PATCH_LEVEL", AmdK8PatchLevel), /* value=0x39 */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x8 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x8059e000 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x81872950 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x105, 0, 0), /* value=0x105 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, UINT64_C(0xfffffffffffffff8), 0), /* value=0x0 */
+ MFX(0x0000017b, "IA32_MCG_CTL", Ia32McgCtl, Ia32McgCtl, 0, UINT64_C(0xffffffffffffffe0), 0), /* value=0x1f */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, UINT64_C(0xffffffffffffff80), 0x40), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0xffffffed`bf1be178 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xffff7f49`bf1bedec */
+ MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp), /* value=0x0 */
+ MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp), /* value=0x0 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffffff00000007ff)), /* value=0xff`c0000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffffff0000000ff8)), /* value=0xf8000001 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffffff00000007ff)), /* value=0xff`fc000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RFN(0x00000400, 0x00000413, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0x800, 0xfe, UINT64_C(0xfffffffffffff200)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x0 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0x0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0x0 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x81913800 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0x0 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x0 */
+ RSN(0xc0010000, 0xc0010003, "AMD_K8_PERF_CTL_n", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x0, UINT64_C(0xffffffff00200000), 0),
+ RSN(0xc0010004, 0xc0010007, "AMD_K8_PERF_CTR_n", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x0, UINT64_C(0xffff000000000000), 0),
+ MFX(0xc0010010, "AMD_K8_SYS_CFG", AmdK8SysCfg, AmdK8SysCfg, 0x160601, UINT64_C(0xffffffffffc0f800), 0), /* value=0x160601 */
+ MFX(0xc0010015, "AMD_K8_HW_CFG", AmdK8HwCr, AmdK8HwCr, 0xc000000, UINT64_C(0xffffffff3ff00000), 0), /* value=0xc000000 */
+ MFW(0xc0010016, "AMD_K8_IORR_BASE_0", AmdK8IorrBaseN, AmdK8IorrBaseN, UINT64_C(0xffffff0000000fe7)), /* value=0x0 */
+ MFW(0xc0010017, "AMD_K8_IORR_MASK_0", AmdK8IorrMaskN, AmdK8IorrMaskN, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0xc0010018, "AMD_K8_IORR_BASE_1", AmdK8IorrBaseN, AmdK8IorrBaseN, 0x1, UINT64_C(0xffffff0000000fe7), 0), /* value=0xf8000018 */
+ MFX(0xc0010019, "AMD_K8_IORR_MASK_1", AmdK8IorrMaskN, AmdK8IorrMaskN, 0x1, UINT64_C(0xffffff00000007ff), 0), /* value=0xff`fc000800 */
+ MFW(0xc001001a, "AMD_K8_TOP_MEM", AmdK8TopOfMemN, AmdK8TopOfMemN, UINT64_C(0xffffff00007fffff)), /* value=0x40000000 */
+ MFX(0xc001001d, "AMD_K8_TOP_MEM2", AmdK8TopOfMemN, AmdK8TopOfMemN, 0x1, UINT64_C(0xffffff00007fffff), 0), /* value=0x0 */
+ MVI(0xc001001e, "AMD_K8_MANID", 0x20),
+ MFX(0xc001001f, "AMD_K8_NB_CFG1", AmdK8NbCfg1, AmdK8NbCfg1, 0, UINT64_C(0xffffff0000000000), 0), /* value=0x11`00000008 */
+ MFN(0xc0010020, "AMD_K8_PATCH_LOADER", WriteOnly, AmdK8PatchLoader),
+ MVX(0xc0010021, "AMD_K8_UNK_c001_0021", 0, UINT64_C(0xfffffffe00000000), 0),
+ MFX(0xc0010022, "AMD_K8_MC_XCPT_REDIR", AmdK8McXcptRedir, AmdK8McXcptRedir, 0, UINT64_C(0xfffffffeffffffff), 0), /* value=0x0 */
+ RFN(0xc0010030, 0xc0010035, "AMD_K8_CPU_NAME_n", AmdK8CpuNameN, AmdK8CpuNameN),
+ MFX(0xc001003e, "AMD_K8_HTC", AmdK8HwThermalCtrl, AmdK8HwThermalCtrl, 0, UINT64_MAX, 0), /* value=0x0 */
+ MFI(0xc001003f, "AMD_K8_STC", AmdK8SwThermalCtrl), /* value=0x0 */
+ MFX(0xc0010041, "AMD_K8_FIDVID_CTL", AmdK8FidVidControl, AmdK8FidVidControl, UINT64_C(0x4e200000000c), 0x33, UINT64_C(0xfff00000fffee0c0)), /* value=0x4e20`0000000c */
+ MFX(0xc0010042, "AMD_K8_FIDVID_STATUS", AmdK8FidVidStatus, ReadOnly, UINT64_C(0x200000c0c0c), 0, 0), /* value=0x200`000c0c0c */
+ MVO(0xc0010043, "AMD_K8_THERMTRIP_STATUS", 0x521020),
+ RFN(0xc0010044, 0xc0010048, "AMD_K8_MC_CTL_MASK_n", AmdK8McCtlMaskN, AmdK8McCtlMaskN),
+ RSN(0xc0010050, 0xc0010053, "AMD_K8_SMI_ON_IO_TRAP_n", AmdK8SmiOnIoTrapN, AmdK8SmiOnIoTrapN, 0x0, 0, UINT64_C(0x1f00000000000000)),
+ MFX(0xc0010054, "AMD_K8_SMI_ON_IO_TRAP_CTL_STS", AmdK8SmiOnIoTrapCtlSts, AmdK8SmiOnIoTrapCtlSts, 0, 0, UINT64_C(0xffffffffffff1f00)), /* value=0x0 */
+ MFX(0xc0010111, "AMD_K8_SMM_BASE", AmdK8SmmBase, AmdK8SmmBase, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x98000 */
+ MFX(0xc0010112, "AMD_K8_SMM_ADDR", AmdK8SmmAddr, AmdK8SmmAddr, 0, UINT64_C(0xffffff000001ffff), 0), /* value=0x0 */
+ MFX(0xc0010113, "AMD_K8_SMM_MASK", AmdK8SmmMask, AmdK8SmmMask, 0, UINT64_C(0xffffff00000188c0), 0), /* value=0x1 */
+ MVX(0xc0010114, "AMD_K8_UNK_c001_0114", 0, 0, UINT64_C(0xffffffffffffffe4)),
+ MVX(0xc0010115, "AMD_K8_UNK_c001_0115", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0xc0010116, "AMD_K8_UNK_c001_0116", 0, 0, UINT64_C(0xffff0000ffff0000)),
+ MVX(0xc0010117, "AMD_K8_UNK_c001_0117", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0xc0010118, "AMD_K8_UNK_c001_0118",0,0,0),
+ MVX(0xc0010119, "AMD_K8_UNK_c001_0119",0,0,0),
+ MVX(0xc001011a, "AMD_K8_UNK_c001_011a", 0, 0, UINT64_C(0xffffffff00000fff)),
+ MVX(0xc001011b, "AMD_K8_UNK_c001_011b", 0, 0, ~(uint64_t)UINT32_MAX),
+ MVX(0xc001011c, "AMD_K8_UNK_c001_011c", UINT32_C(0xdb1f5000), 0, UINT64_C(0xffffffff00000fff)),
+ MFX(0xc0011000, "AMD_K7_MCODE_CTL", AmdK7MicrocodeCtl, AmdK7MicrocodeCtl, 0, ~(uint64_t)UINT32_MAX, 0x204), /* value=0x0 */
+ MFX(0xc0011001, "AMD_K7_APIC_CLUSTER_ID", AmdK7ClusterIdMaybe, AmdK7ClusterIdMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0011004, "AMD_K8_CPUID_CTL_STD01", AmdK8CpuIdCtlStd01hEdcx, AmdK8CpuIdCtlStd01hEdcx, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x78bfbff */
+ MFX(0xc0011005, "AMD_K8_CPUID_CTL_EXT01", AmdK8CpuIdCtlExt01hEdcx, AmdK8CpuIdCtlExt01hEdcx, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0xf1f3fbff */
+ MFX(0xc0011006, "AMD_K7_DEBUG_STS?", AmdK7DebugStatusMaybe, AmdK7DebugStatusMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0xc0011007, "AMD_K7_BH_TRACE_BASE?", AmdK7BHTraceBaseMaybe, AmdK7BHTraceBaseMaybe), /* value=0x0 */
+ MFN(0xc0011008, "AMD_K7_BH_TRACE_PTR?", AmdK7BHTracePtrMaybe, AmdK7BHTracePtrMaybe), /* value=0x0 */
+ MFN(0xc0011009, "AMD_K7_BH_TRACE_LIM?", AmdK7BHTraceLimitMaybe, AmdK7BHTraceLimitMaybe), /* value=0x0 */
+ MFX(0xc001100a, "AMD_K7_HDT_CFG?", AmdK7HardwareDebugToolCfgMaybe, AmdK7HardwareDebugToolCfgMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc001100b, "AMD_K7_FAST_FLUSH_COUNT?", AmdK7FastFlushCountMaybe, AmdK7FastFlushCountMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x7c0 */
+ MFX(0xc001100c, "AMD_K7_NODE_ID", AmdK7NodeId, AmdK7NodeId, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x20906 */
+ MVX(0xc001100d, "AMD_K8_LOGICAL_CPUS_NUM?", 0x10a, 0, 0),
+ MVX(0xc001100e, "AMD_K8_WRMSR_BP?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001100f, "AMD_K8_WRMSR_BP_MASK?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011010, "AMD_K8_BH_TRACE_CTL?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011011, "AMD_K8_BH_TRACE_USRD?", 0, 0, 0), /* value=0xc0011011`00000283 */
+ MVX(0xc0011014, "AMD_K8_XCPT_BP_RIP?", 0, 0, 0),
+ MVX(0xc0011015, "AMD_K8_XCPT_BP_RIP_MASK?", 0, 0, 0),
+ MVX(0xc0011016, "AMD_K8_COND_HDT_VAL?", 0, 0, 0),
+ MVX(0xc0011017, "AMD_K8_COND_HDT_VAL_MASK?", 0, 0, 0),
+ MVX(0xc0011018, "AMD_K8_XCPT_BP_CTL?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001101d, "AMD_K8_NB_BIST?", 0, UINT64_C(0xfffffffffc000000), 0),
+ MVI(0xc001101e, "AMD_K8_THERMTRIP_2?", 0x521020), /* Villain? */
+ MVX(0xc001101f, "AMD_K8_NB_CFG?", UINT64_C(0x1100000008), UINT64_C(0xffffff0000000000), 0),
+ MFX(0xc0011020, "AMD_K7_LS_CFG", AmdK7LoadStoreCfg, AmdK7LoadStoreCfg, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x1000 */
+ MFX(0xc0011021, "AMD_K7_IC_CFG", AmdK7InstrCacheCfg, AmdK7InstrCacheCfg, 0x800, ~(uint64_t)UINT32_MAX, 0), /* value=0x800 */
+ MFX(0xc0011022, "AMD_K7_DC_CFG", AmdK7DataCacheCfg, AmdK7DataCacheCfg, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x24000008 */
+ MFN(0xc0011023, "AMD_K7_BU_CFG", AmdK7BusUnitCfg, AmdK7BusUnitCfg), /* Villain? value=0x2020 */
+ MFX(0xc0011024, "AMD_K7_DEBUG_CTL_2?", AmdK7DebugCtl2Maybe, AmdK7DebugCtl2Maybe, 0, UINT64_C(0xffffffffffffff00), 0), /* value=0x0 */
+ MFN(0xc0011025, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMatchMaybe, AmdK7Dr0DataMatchMaybe), /* value=0x0 */
+ MFN(0xc0011026, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMaskMaybe, AmdK7Dr0DataMaskMaybe), /* value=0x0 */
+ MFX(0xc0011027, "AMD_K7_DR0_ADDR_MASK", AmdK7DrXAddrMaskN, AmdK7DrXAddrMaskN, 0x0, UINT64_C(0xfffffffffffff000), 0), /* value=0x0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for AMD Athlon(tm) 64 Processor 3200+.
+ */
+static CPUMDBENTRY const g_Entry_AMD_Athlon_64_3200 =
+{
+ /*.pszName = */ "AMD Athlon 64 3200+",
+ /*.pszFullName = */ "AMD Athlon(tm) 64 Processor 3200+",
+ /*.enmVendor = */ CPUMCPUVENDOR_AMD,
+ /*.uFamily = */ 15,
+ /*.uModel = */ 4,
+ /*.uStepping = */ 8,
+ /*.enmMicroarch = */ kCpumMicroarch_AMD_K8_130nm,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 40,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_AMD_Athlon_64_3200),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_AMD_Athlon_64_3200)),
+ /*.enmUnknownCpuId = */ CPUMUKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_AMD_Athlon_64_3200)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_AMD_Athlon_64_3200),
+};
+
+#endif /* !VBOX_DB_AMD_Athlon_64_3200 */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/AMD_Athlon_64_X2_Dual_Core_4200.h b/src/VBox/VMM/VMMR3/cpus/AMD_Athlon_64_X2_Dual_Core_4200.h
new file mode 100644
index 00000000..0c5557b7
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/AMD_Athlon_64_X2_Dual_Core_4200.h
@@ -0,0 +1,188 @@
+/* $Id: AMD_Athlon_64_X2_Dual_Core_4200.h $ */
+/** @file
+ * CPU database entry "AMD Athlon 64 X2 Dual Core 4200+".
+ * Generated at 2014-02-28T15:19:16Z by VBoxCpuReport v4.3.53r92578 on linux.amd64 .
+ * .
+ * @remarks Possible that we're missing a few special MSRs due to no .
+ * magic register value capabilities in the linux hosted .
+ * MSR probing code.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VBOX_CPUDB_AMD_Athlon_64_X2_Dual_Core_4200
+#define VBOX_CPUDB_AMD_Athlon_64_X2_Dual_Core_4200
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for AMD Athlon(tm) 64 X2 Dual Core Processor 4200+.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_AMD_Athlon_64_X2_Dual_Core_4200[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00040fb2, 0x01020800, 0x00002001, 0x178bfbff, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000018, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00040fb2, 0x000008d1, 0x0000001f, 0xebd3fbff, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20444d41, 0x6c687441, 0x74286e6f, 0x3620296d, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x32582034, 0x61754420, 0x6f43206c, 0x50206572, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x65636f72, 0x726f7373, 0x30323420, 0x00002b30, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0xff08ff08, 0xff20ff20, 0x40020140, 0x40020140, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x42004200, 0x02008140, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000003f, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003028, 0x00000000, 0x00000001, 0x00000000, 0 },
+ { 0x80000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000a, 0x00000000, 0x00000000, 0x00000001, 0x00000040, 0x00000000, 0x00000000, 0 },
+ { 0x8000000b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000d, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000010, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000012, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000013, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000014, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000015, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000016, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000017, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for AMD Athlon(tm) 64 X2 Dual Core Processor 4200+.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_AMD_Athlon_64_X2_Dual_Core_4200[] =
+{
+ MAL(0x00000000, "IA32_P5_MC_ADDR", 0x00000402),
+ MAL(0x00000001, "IA32_P5_MC_TYPE", 0x00000401),
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x7e`171166b8 */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xffffff00000006ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFO(0x0000008b, "AMD_K8_PATCH_LEVEL", AmdK8PatchLevel), /* value=0x0 */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x10 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x8103ca80 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x105, 0, 0), /* value=0x105 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, UINT64_C(0xfffffffffffffff8), 0), /* value=0x0 */
+ MFX(0x0000017b, "IA32_MCG_CTL", Ia32McgCtl, Ia32McgCtl, 0, UINT64_C(0xffffffffffffffe0), 0), /* value=0x1f */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, UINT64_C(0xffffffffffffff80), 0x40), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0xffffffff`a0425995 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xffffffff`8103124a */
+ MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp), /* value=0x0 */
+ MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp), /* value=0x0 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffffff00000007ff)), /* value=0xff`80000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x80000006 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffffff00000007ff)), /* value=0xff`c0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffffff0000000ff8)), /* value=0xf8000001 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffffff00000007ff)), /* value=0xff`ff000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RFN(0x00000400, 0x00000413, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0xfe, UINT64_C(0xffffffffffff8200)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xffffffff`81011d20 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xffffffff`8103ccb0 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x3700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x1da4880 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffff8800`28300000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x0 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x1 */
+ RSN(0xc0010000, 0xc0010003, "AMD_K8_PERF_CTL_n", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x0, UINT64_C(0xffffffff00200000), 0),
+ RSN(0xc0010004, 0xc0010007, "AMD_K8_PERF_CTR_n", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x0, UINT64_C(0xffff000000000000), 0),
+ MFX(0xc0010010, "AMD_K8_SYS_CFG", AmdK8SysCfg, AmdK8SysCfg, 0x760601, UINT64_C(0xffffffffff80f800), 0), /* value=0x760601 */
+ MFX(0xc0010015, "AMD_K8_HW_CFG", AmdK8HwCr, AmdK8HwCr, 0x2000060, UINT64_C(0xffffffff3ff00020), 0), /* value=0x2000060 */
+ MFW(0xc0010016, "AMD_K8_IORR_BASE_0", AmdK8IorrBaseN, AmdK8IorrBaseN, UINT64_C(0xffffff0000000fe7)), /* value=0xa30000 */
+ MFW(0xc0010017, "AMD_K8_IORR_MASK_0", AmdK8IorrMaskN, AmdK8IorrMaskN, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0xc0010018, "AMD_K8_IORR_BASE_1", AmdK8IorrBaseN, AmdK8IorrBaseN, 0x1, UINT64_C(0xffffff0000000fe7), 0), /* value=0x0 */
+ MFX(0xc0010019, "AMD_K8_IORR_MASK_1", AmdK8IorrMaskN, AmdK8IorrMaskN, 0x1, UINT64_C(0xffffff00000007ff), 0), /* value=0x0 */
+ MFW(0xc001001a, "AMD_K8_TOP_MEM", AmdK8TopOfMemN, AmdK8TopOfMemN, UINT64_C(0xffffff00007fffff)), /* value=0xc0000000 */
+ MFX(0xc001001d, "AMD_K8_TOP_MEM2", AmdK8TopOfMemN, AmdK8TopOfMemN, 0x1, UINT64_C(0xffffff00007fffff), 0), /* value=0x1`40000000 */
+ MVI(0xc001001e, "AMD_K8_MANID", 0x52),
+ MFX(0xc001001f, "AMD_K8_NB_CFG1", AmdK8NbCfg1, AmdK8NbCfg1, 0, UINT64_C(0x3fbf000000000000), 0), /* value=0x400001`00100008 */
+ MFN(0xc0010020, "AMD_K8_PATCH_LOADER", WriteOnly, AmdK8PatchLoader),
+ MFN(0xc0010021, "AMD_K8_UNK_c001_0021", WriteOnly, IgnoreWrite),
+ RFN(0xc0010030, 0xc0010035, "AMD_K8_CPU_NAME_n", AmdK8CpuNameN, AmdK8CpuNameN),
+ MFX(0xc001003e, "AMD_K8_HTC", AmdK8HwThermalCtrl, AmdK8HwThermalCtrl, 0, UINT64_C(0xfffffffff0e088fc), 0), /* value=0x0 */
+ MFX(0xc001003f, "AMD_K8_STC", AmdK8SwThermalCtrl, AmdK8SwThermalCtrl, 0, UINT64_C(0xfffffffff0e088e0), 0), /* value=0x0 */
+ MFX(0xc0010041, "AMD_K8_FIDVID_CTL", AmdK8FidVidControl, AmdK8FidVidControl, UINT64_C(0x100001202), 0xc31, UINT64_C(0xfff00000fffec0c0)), /* value=0x1`00001202 */
+ MFX(0xc0010042, "AMD_K8_FIDVID_STATUS", AmdK8FidVidStatus, ReadOnly, UINT64_C(0x310c12120c0e0202), 0, 0), /* value=0x310c1212`0c0e0202 */
+ MVO(0xc0010043, "AMD_K8_THERMTRIP_STATUS", 0x4e1a24),
+ RFN(0xc0010044, 0xc0010048, "AMD_K8_MC_CTL_MASK_n", AmdK8McCtlMaskN, AmdK8McCtlMaskN),
+ RSN(0xc0010050, 0xc0010053, "AMD_K8_SMI_ON_IO_TRAP_n", AmdK8SmiOnIoTrapN, AmdK8SmiOnIoTrapN, 0x0, 0, UINT64_C(0x1f00000000000000)),
+ MFX(0xc0010054, "AMD_K8_SMI_ON_IO_TRAP_CTL_STS", AmdK8SmiOnIoTrapCtlSts, AmdK8SmiOnIoTrapCtlSts, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xffff1f00)), /* value=0x0 */
+ MFX(0xc0010055, "AMD_K8_INT_PENDING_MSG", AmdK8IntPendingMessage, AmdK8IntPendingMessage, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xe0000000)), /* value=0x3000000 */
+ MVO(0xc0010060, "AMD_K8_BIST_RESULT", 0),
+ MFX(0xc0010111, "AMD_K8_SMM_BASE", AmdK8SmmBase, AmdK8SmmBase, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x98200 */
+ MFX(0xc0010112, "AMD_K8_SMM_ADDR", AmdK8SmmAddr, AmdK8SmmAddr, 0, UINT64_C(0xffffff000001ffff), 0), /* value=0x0 */
+ MFX(0xc0010113, "AMD_K8_SMM_MASK", AmdK8SmmMask, AmdK8SmmMask, 0, UINT64_C(0xffffff00000188c0), 0), /* value=0x1 */
+ MFX(0xc0010114, "AMD_K8_VM_CR", AmdK8VmCr, AmdK8VmCr, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xffffffe0)), /* value=0x0 */
+ MFX(0xc0010115, "AMD_K8_IGNNE", AmdK8IgnNe, AmdK8IgnNe, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xfffffffe)), /* value=0x0 */
+ MFN(0xc0010116, "AMD_K8_SMM_CTL", WriteOnly, AmdK8SmmCtl),
+ MFX(0xc0010117, "AMD_K8_VM_HSAVE_PA", AmdK8VmHSavePa, AmdK8VmHSavePa, 0, 0, UINT64_C(0xffffff0000000fff)), /* value=0x0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for AMD Athlon(tm) 64 X2 Dual Core Processor 4200+.
+ */
+static CPUMDBENTRY const g_Entry_AMD_Athlon_64_X2_Dual_Core_4200 =
+{
+ /*.pszName = */ "AMD Athlon 64 X2 Dual Core 4200+",
+ /*.pszFullName = */ "AMD Athlon(tm) 64 X2 Dual Core Processor 4200+",
+ /*.enmVendor = */ CPUMCPUVENDOR_AMD,
+ /*.uFamily = */ 15,
+ /*.uModel = */ 75,
+ /*.uStepping = */ 2,
+ /*.enmMicroarch = */ kCpumMicroarch_AMD_K8_90nm_AMDV,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 40,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_AMD_Athlon_64_X2_Dual_Core_4200),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_AMD_Athlon_64_X2_Dual_Core_4200)),
+ /*.enmUnknownCpuId = */ CPUMUKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_AMD_Athlon_64_X2_Dual_Core_4200)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_AMD_Athlon_64_X2_Dual_Core_4200),
+};
+
+#endif /* !VBOX_DB_AMD_Athlon_64_X2_Dual_Core_4200 */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/AMD_FX_8150_Eight_Core.h b/src/VBox/VMM/VMMR3/cpus/AMD_FX_8150_Eight_Core.h
new file mode 100644
index 00000000..f7982073
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/AMD_FX_8150_Eight_Core.h
@@ -0,0 +1,379 @@
+/* $Id: AMD_FX_8150_Eight_Core.h $ */
+/** @file
+ * CPU database entry "AMD FX-8150 Eight-Core".
+ * Generated at 2013-12-09T11:27:04Z by VBoxCpuReport v4.3.51r91084 on win.amd64.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VBOX_CPUDB_AMD_FX_8150_Eight_Core
+#define VBOX_CPUDB_AMD_FX_8150_Eight_Core
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for AMD FX(tm)-8150 Eight-Core Processor.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_AMD_FX_8150_Eight_Core[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000d, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00600f12, 0x02080800, 0x1e98220b, 0x178bfbff, 0 },
+ { 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00000000, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000000, UINT32_MAX, 0x00000007, 0x00000340, 0x000003c0, 0x40000000, 0 },
+ { 0x0000000d, 0x00000001, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x8000001e, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00600f12, 0x10000000, 0x01c9bfff, 0x2fd3fbff, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20444d41, 0x74285846, 0x382d296d, 0x20303531, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x68676945, 0x6f432d74, 0x50206572, 0x65636f72, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x726f7373, 0x20202020, 0x20202020, 0x00202020, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0xff20ff18, 0xff20ff30, 0x10040140, 0x40020140, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x64000000, 0x64004200, 0x08008140, 0x0040c140, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000003d9, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003030, 0x00000000, 0x00004007, 0x00000000, 0 },
+ { 0x80000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000a, 0x00000000, 0x00000000, 0x00000001, 0x00010000, 0x00000000, 0x000014ff, 0 },
+ { 0x8000000b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000d, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000010, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000012, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000013, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000014, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000015, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000016, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000017, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000019, 0x00000000, 0x00000000, 0xf020f018, 0x64000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001a, 0x00000000, 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001b, 0x00000000, 0x00000000, 0x000000ff, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001c, 0x00000000, 0x00000000, 0x00000000, 0x80032013, 0x00010200, 0x8000000f, 0 },
+ { 0x8000001d, 0x00000000, UINT32_MAX, 0x00000121, 0x00c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x8000001d, 0x00000001, UINT32_MAX, 0x00004122, 0x0040003f, 0x000001ff, 0x00000000, 0 },
+ { 0x8000001d, 0x00000002, UINT32_MAX, 0x00004143, 0x03c0003f, 0x000007ff, 0x00000001, 0 },
+ { 0x8000001d, 0x00000003, UINT32_MAX, 0x0001c163, 0x0fc0003f, 0x000007ff, 0x00000001, 0 },
+ { 0x8000001d, 0x00000004, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001e, 0x00000000, 0x00000000, 0x00000012, 0x00000101, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for AMD FX(tm)-8150 Eight-Core Processor.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_AMD_FX_8150_Eight_Core[] =
+{
+ MAL(0x00000000, "IA32_P5_MC_ADDR", 0x00000402),
+ MAL(0x00000001, "IA32_P5_MC_TYPE", 0x00000401),
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter),
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xffff0000000006ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVO(0x0000008b, "BBL_CR_D3|BIOS_SIGN", 0x6000626),
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf),
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf),
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x107, 0, 0), /* value=0x107 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, UINT64_C(0xfffffffffffffff8), 0), /* value=0x0 */
+ MFX(0x0000017b, "IA32_MCG_CTL", Ia32McgCtl, Ia32McgCtl, 0, UINT64_C(0xffffffffffffff88), 0), /* value=0x77 */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, UINT64_C(0xffffffffffffff80), 0x40), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0x0 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0x0 */
+ MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp), /* value=0x0 */
+ MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp), /* value=0x0 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffff000000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`80000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffff000000000ff8)), /* value=0x80000006 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`c0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffff000000000ff8)), /* value=0xc0000006 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`f0000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffff000000000ff8)), /* value=0xcdf00000 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`fff00800 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffff000000000ff8)), /* value=0xce000000 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`fe000800 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RFN(0x00000400, 0x0000041b, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0x4d01, 0xfe, UINT64_C(0xffffffffffff8200)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xfffff800`02ed0bc0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xfffff800`02ed0900 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x4700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0xfffe0000 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xfffff880`02f65000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x7ff`fffde000 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0000104, "AMD_15H_TSC_RATE", AmdFam15hTscRate, AmdFam15hTscRate, 0, 0, UINT64_C(0xffffff0000000000)), /* value=0x1`00000000 */
+ MFX(0xc0000105, "AMD_15H_LWP_CFG", AmdFam15hLwpCfg, AmdFam15hLwpCfg, 0, UINT64_C(0xffff000000000001), 0x7ffffff0), /* value=0x0 */
+ MFX(0xc0000106, "AMD_15H_LWP_CBADDR", AmdFam15hLwpCbAddr, AmdFam15hLwpCbAddr, 0, 0, UINT64_MAX), /* value=0x0 */
+ RSN(0xc0000408, 0xc0000409, "AMD_10H_MC4_MISCn", AmdFam10hMc4MiscN, AmdFam10hMc4MiscN, 0, UINT64_C(0xff00f000ffffffff), 0),
+ RVI(0xc000040a, 0xc000040f, "AMD_10H_MC4_MISCn", 0),
+ MAL(0xc0010000, "AMD_K8_PERF_CTL_0", 0xc0010200),
+ MAL(0xc0010001, "AMD_K8_PERF_CTL_1", 0xc0010202),
+ MAL(0xc0010002, "AMD_K8_PERF_CTL_2", 0xc0010204),
+ MAL(0xc0010003, "AMD_K8_PERF_CTL_3", 0xc0010206),
+ MAL(0xc0010004, "AMD_K8_PERF_CTR_0", 0xc0010201),
+ MAL(0xc0010005, "AMD_K8_PERF_CTR_1", 0xc0010203),
+ MAL(0xc0010006, "AMD_K8_PERF_CTR_2", 0xc0010205),
+ MAL(0xc0010007, "AMD_K8_PERF_CTR_3", 0xc0010207),
+ MFX(0xc0010010, "AMD_K8_SYS_CFG", AmdK8SysCfg, AmdK8SysCfg, 0x740000, UINT64_C(0xffffffffff82ffff), 0), /* value=0x740000 */
+ MFX(0xc0010015, "AMD_K8_HW_CFG", AmdK8HwCr, AmdK8HwCr, 0, UINT64_C(0xffffffff01006020), 0), /* value=0x1001031 */
+ MFW(0xc0010016, "AMD_K8_IORR_BASE_0", AmdK8IorrBaseN, AmdK8IorrBaseN, UINT64_C(0xffff000000000fe7)), /* value=0x0 */
+ MFW(0xc0010017, "AMD_K8_IORR_MASK_0", AmdK8IorrMaskN, AmdK8IorrMaskN, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0xc0010018, "AMD_K8_IORR_BASE_1", AmdK8IorrBaseN, AmdK8IorrBaseN, 0x1, UINT64_C(0xffff000000000fe7), 0), /* value=0x0 */
+ MFX(0xc0010019, "AMD_K8_IORR_MASK_1", AmdK8IorrMaskN, AmdK8IorrMaskN, 0x1, UINT64_C(0xffff0000000007ff), 0), /* value=0x0 */
+ MFW(0xc001001a, "AMD_K8_TOP_MEM", AmdK8TopOfMemN, AmdK8TopOfMemN, UINT64_C(0xffff0000007fffff)), /* value=0xd0000000 */
+ MFX(0xc001001d, "AMD_K8_TOP_MEM2", AmdK8TopOfMemN, AmdK8TopOfMemN, 0x1, UINT64_C(0xffff0000007fffff), 0), /* value=0x4`2f000000 */
+ MFN(0xc001001f, "AMD_K8_NB_CFG1", AmdK8NbCfg1, AmdK8NbCfg1), /* value=0x400000`00810008 */
+ MFN(0xc0010020, "AMD_K8_PATCH_LOADER", WriteOnly, AmdK8PatchLoader),
+ MFX(0xc0010022, "AMD_K8_MC_XCPT_REDIR", AmdK8McXcptRedir, AmdK8McXcptRedir, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0x0 */
+ MVO(0xc0010028, "AMD_K8_UNK_c001_0028", 0),
+ MVO(0xc0010029, "AMD_K8_UNK_c001_0029", 0),
+ MVO(0xc001002a, "AMD_K8_UNK_c001_002a", 0),
+ MVO(0xc001002b, "AMD_K8_UNK_c001_002b", 0),
+ MVO(0xc001002c, "AMD_K8_UNK_c001_002c", 0),
+ MVO(0xc001002d, "AMD_K8_UNK_c001_002d", 0),
+ RFN(0xc0010030, 0xc0010035, "AMD_K8_CPU_NAME_n", AmdK8CpuNameN, AmdK8CpuNameN),
+ MFX(0xc001003e, "AMD_K8_HTC", AmdK8HwThermalCtrl, AmdK8HwThermalCtrl, 0x664c0005, UINT64_C(0xffffffff90008838), 0), /* value=0x664c0005 */
+ MFX(0xc001003f, "AMD_K8_STC", AmdK8SwThermalCtrl, AmdK8SwThermalCtrl, 0, UINT64_C(0xffffffff9fffffdf), 0), /* value=0x60000000 */
+ MVO(0xc0010043, "AMD_K8_THERMTRIP_STATUS", 0x20),
+ MFX(0xc0010044, "AMD_K8_MC_CTL_MASK_0", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x0, UINT64_C(0xfffffffffffffc00), 0), /* value=0x0 */
+ MFX(0xc0010045, "AMD_K8_MC_CTL_MASK_1", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x1, UINT64_C(0xffffffffff004d01), 0), /* value=0x48080 */
+ MFX(0xc0010046, "AMD_K8_MC_CTL_MASK_2", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x2, UINT64_C(0xffffffffffff8000), 0), /* value=0x0 */
+ MFX(0xc0010047, "AMD_K8_MC_CTL_MASK_3", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x3, UINT64_MAX, 0), /* value=0x0 */
+ MFX(0xc0010048, "AMD_K8_MC_CTL_MASK_4", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x4, ~(uint64_t)UINT32_MAX, 0), /* value=0x780400 */
+ MFX(0xc0010049, "AMD_K8_MC_CTL_MASK_5", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x5, UINT64_C(0xffffffffffffe000), 0), /* value=0x0 */
+ MFX(0xc001004a, "AMD_K8_MC_CTL_MASK_6", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x6, UINT64_C(0xffffffffffffffc0), 0), /* value=0x0 */
+ RFN(0xc0010050, 0xc0010053, "AMD_K8_SMI_ON_IO_TRAP_n", AmdK8SmiOnIoTrapN, AmdK8SmiOnIoTrapN),
+ MFX(0xc0010054, "AMD_K8_SMI_ON_IO_TRAP_CTL_STS", AmdK8SmiOnIoTrapCtlSts, AmdK8SmiOnIoTrapCtlSts, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0010055, "AMD_K8_INT_PENDING_MSG", AmdK8IntPendingMessage, AmdK8IntPendingMessage, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x20000800 */
+ MFX(0xc0010056, "AMD_K8_SMI_TRIGGER_IO_CYCLE", AmdK8SmiTriggerIoCycle, AmdK8SmiTriggerIoCycle, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x2000000 */
+ MFX(0xc0010058, "AMD_10H_MMIO_CFG_BASE_ADDR", AmdFam10hMmioCfgBaseAddr, AmdFam10hMmioCfgBaseAddr, 0, UINT64_C(0xffff0000000fffc0), 0), /* value=0xe0000021 */
+ MFX(0xc0010059, "AMD_10H_TRAP_CTL?", AmdFam10hTrapCtlMaybe, AmdFam10hTrapCtlMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVX(0xc001005a, "AMD_10H_UNK_c001_005a", 0, 0, 0),
+ MVX(0xc001005b, "AMD_10H_UNK_c001_005b", 0, 0, 0),
+ MVX(0xc001005c, "AMD_10H_UNK_c001_005c", 0, 0, 0),
+ MVX(0xc001005d, "AMD_10H_UNK_c001_005d", 0, 0, 0),
+ MVO(0xc0010060, "AMD_K8_BIST_RESULT", 0),
+ MFX(0xc0010061, "AMD_10H_P_ST_CUR_LIM", AmdFam10hPStateCurLimit, ReadOnly, 0x40, 0, 0), /* value=0x40 */
+ MFX(0xc0010062, "AMD_10H_P_ST_CTL", AmdFam10hPStateControl, AmdFam10hPStateControl, 0, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x0 */
+ MFX(0xc0010063, "AMD_10H_P_ST_STS", AmdFam10hPStateStatus, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0xc0010064, "AMD_10H_P_ST_0", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x800001b10000161a), UINT64_C(0x7ffffc00ffbf0000), 0), /* value=0x800001b1`0000161a */
+ MFX(0xc0010065, "AMD_10H_P_ST_1", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x800001b100001a17), UINT64_C(0x7ffffc00ffbf0000), 0), /* value=0x800001b1`00001a17 */
+ MFX(0xc0010066, "AMD_10H_P_ST_2", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000017300003014), UINT64_C(0x7ffffc00ffbf0000), 0), /* value=0x80000173`00003014 */
+ MFX(0xc0010067, "AMD_10H_P_ST_3", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000016300003a11), UINT64_C(0x7ffffc00ffbf0000), 0), /* value=0x80000163`00003a11 */
+ MFX(0xc0010068, "AMD_10H_P_ST_4", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000014900004c0b), UINT64_C(0x7ffffc00ffbf0000), 0), /* value=0x80000149`00004c0b */
+ MFX(0xc0010069, "AMD_10H_P_ST_5", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000013100006205), UINT64_C(0x7ffffc00ffbf0000), 0), /* value=0x80000131`00006205 */
+ MFX(0xc001006a, "AMD_10H_P_ST_6", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x800001200000724c), UINT64_C(0x7ffffc00ffbf0000), 0), /* value=0x80000120`0000724c */
+ MFX(0xc001006b, "AMD_10H_P_ST_7", AmdFam10hPStateN, AmdFam10hPStateN, 0, UINT64_C(0x7ffffc00ffbf0000), 0), /* value=0x0 */
+ MFX(0xc0010070, "AMD_10H_COFVID_CTL", AmdFam10hCofVidControl, AmdFam10hCofVidControl, 0x40011a17, UINT64_C(0xffffffff00b80000), 0), /* value=0x40011a17 */
+ MFX(0xc0010071, "AMD_10H_COFVID_STS", AmdFam10hCofVidStatus, AmdFam10hCofVidStatus, UINT64_C(0x18000064006724c), UINT64_MAX, 0), /* value=0x1800006`4006724c */
+ MFX(0xc0010073, "AMD_10H_C_ST_IO_BASE_ADDR", AmdFam10hCStateIoBaseAddr, AmdFam10hCStateIoBaseAddr, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0x814 */
+ MFX(0xc0010074, "AMD_10H_CPU_WD_TMR_CFG", AmdFam10hCpuWatchdogTimer, AmdFam10hCpuWatchdogTimer, 0, UINT64_C(0xffffffffffffff80), 0), /* value=0x0 */
+ MFX(0xc0010111, "AMD_K8_SMM_BASE", AmdK8SmmBase, AmdK8SmmBase, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0xcdef8800 */
+ MFX(0xc0010112, "AMD_K8_SMM_ADDR", AmdK8SmmAddr, AmdK8SmmAddr, 0, UINT64_C(0xffff00000001ffff), 0), /* value=0xcdf00000 */
+ MFX(0xc0010113, "AMD_K8_SMM_MASK", AmdK8SmmMask, AmdK8SmmMask, 0, UINT64_C(0xffff0000000188c0), 0), /* value=0xffff`fff00003 */
+ MFX(0xc0010114, "AMD_K8_VM_CR", AmdK8VmCr, AmdK8VmCr, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xffffffe0)), /* value=0x8 */
+ MFX(0xc0010115, "AMD_K8_IGNNE", AmdK8IgnNe, AmdK8IgnNe, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xfffffffe)), /* value=0x0 */
+ MFX(0xc0010117, "AMD_K8_VM_HSAVE_PA", AmdK8VmHSavePa, AmdK8VmHSavePa, 0, 0, UINT64_C(0xffff000000000fff)), /* value=0x0 */
+ MFN(0xc0010118, "AMD_10H_VM_LOCK_KEY", AmdFam10hVmLockKey, AmdFam10hVmLockKey), /* value=0x0 */
+ MFN(0xc0010119, "AMD_10H_SSM_LOCK_KEY", AmdFam10hSmmLockKey, AmdFam10hSmmLockKey), /* value=0x0 */
+ MFX(0xc001011a, "AMD_10H_LOCAL_SMI_STS", AmdFam10hLocalSmiStatus, AmdFam10hLocalSmiStatus, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0010140, "AMD_10H_OSVW_ID_LEN", AmdFam10hOsVisWrkIdLength, AmdFam10hOsVisWrkIdLength, 0x4, 0, 0), /* value=0x4 */
+ MFN(0xc0010141, "AMD_10H_OSVW_STS", AmdFam10hOsVisWrkStatus, AmdFam10hOsVisWrkStatus), /* value=0x0 */
+ MFX(0xc0010200, "AMD_K8_PERF_CTL_0", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x0, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010201, "AMD_K8_PERF_CTR_0", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010202, "AMD_K8_PERF_CTL_1", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x1, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010203, "AMD_K8_PERF_CTR_1", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x1, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010204, "AMD_K8_PERF_CTL_2", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x2, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010205, "AMD_K8_PERF_CTR_2", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x2, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010206, "AMD_K8_PERF_CTL_3", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x3, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010207, "AMD_K8_PERF_CTR_3", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x3, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010208, "AMD_K8_PERF_CTL_4", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x4, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc0010209, "AMD_K8_PERF_CTR_4", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x4, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc001020a, "AMD_K8_PERF_CTL_5", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x5, UINT64_C(0xfffffcf000200000), 0), /* value=0x0 */
+ MFX(0xc001020b, "AMD_K8_PERF_CTR_5", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x5, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010240, "AMD_15H_NB_PERF_CTL_0", AmdFam15hNorthbridgePerfCtlN, AmdFam15hNorthbridgePerfCtlN, 0x0, UINT64_C(0xfffffe00ffa70000), 0), /* value=0x0 */
+ MFX(0xc0010241, "AMD_15H_NB_PERF_CTR_0", AmdFam15hNorthbridgePerfCtrN, AmdFam15hNorthbridgePerfCtrN, 0x0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010242, "AMD_15H_NB_PERF_CTL_1", AmdFam15hNorthbridgePerfCtlN, AmdFam15hNorthbridgePerfCtlN, 0x1, UINT64_C(0xfffffe00ffa70000), 0), /* value=0x0 */
+ MFX(0xc0010243, "AMD_15H_NB_PERF_CTR_1", AmdFam15hNorthbridgePerfCtrN, AmdFam15hNorthbridgePerfCtrN, 0x1, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010244, "AMD_15H_NB_PERF_CTL_2", AmdFam15hNorthbridgePerfCtlN, AmdFam15hNorthbridgePerfCtlN, 0x2, UINT64_C(0xfffffe00ffa70000), 0), /* value=0x0 */
+ MFX(0xc0010245, "AMD_15H_NB_PERF_CTR_2", AmdFam15hNorthbridgePerfCtrN, AmdFam15hNorthbridgePerfCtrN, 0x2, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0010246, "AMD_15H_NB_PERF_CTL_3", AmdFam15hNorthbridgePerfCtlN, AmdFam15hNorthbridgePerfCtlN, 0x3, UINT64_C(0xfffffe00ffa70000), 0), /* value=0x0 */
+ MFX(0xc0010247, "AMD_15H_NB_PERF_CTR_3", AmdFam15hNorthbridgePerfCtrN, AmdFam15hNorthbridgePerfCtrN, 0x3, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFX(0xc0011000, "AMD_K7_MCODE_CTL", AmdK7MicrocodeCtl, AmdK7MicrocodeCtl, 0x30000, ~(uint64_t)UINT32_MAX, 0x204), /* value=0x30000 */
+ MFX(0xc0011001, "AMD_K7_APIC_CLUSTER_ID", AmdK7ClusterIdMaybe, AmdK7ClusterIdMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0011003, "AMD_K8_CPUID_CTL_STD06", AmdK8CpuIdCtlStd06hEcx, AmdK8CpuIdCtlStd06hEcx, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x1 */
+ MFN(0xc0011004, "AMD_K8_CPUID_CTL_STD01", AmdK8CpuIdCtlStd01hEdcx, AmdK8CpuIdCtlStd01hEdcx), /* value=0x1e98220b`178bfbff */
+ MFN(0xc0011005, "AMD_K8_CPUID_CTL_EXT01", AmdK8CpuIdCtlExt01hEdcx, AmdK8CpuIdCtlExt01hEdcx), /* value=0x1c9ffff`2fd3fbff */
+ MFX(0xc0011006, "AMD_K7_DEBUG_STS?", AmdK7DebugStatusMaybe, AmdK7DebugStatusMaybe, 0, UINT64_C(0xffffffff00000080), 0), /* value=0x10 */
+ MFN(0xc0011007, "AMD_K7_BH_TRACE_BASE?", AmdK7BHTraceBaseMaybe, AmdK7BHTraceBaseMaybe), /* value=0x0 */
+ MFN(0xc0011008, "AMD_K7_BH_TRACE_PTR?", AmdK7BHTracePtrMaybe, AmdK7BHTracePtrMaybe), /* value=0x0 */
+ MFN(0xc0011009, "AMD_K7_BH_TRACE_LIM?", AmdK7BHTraceLimitMaybe, AmdK7BHTraceLimitMaybe), /* value=0x0 */
+ MFX(0xc001100a, "AMD_K7_HDT_CFG?", AmdK7HardwareDebugToolCfgMaybe, AmdK7HardwareDebugToolCfgMaybe, 0, UINT64_C(0xffffffff00800000), 0), /* value=0x0 */
+ MFX(0xc001100b, "AMD_K7_FAST_FLUSH_COUNT?", AmdK7FastFlushCountMaybe, AmdK7FastFlushCountMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x7c0 */
+ MFX(0xc001100c, "AMD_K7_NODE_ID", AmdK7NodeId, AmdK7NodeId, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x80 */
+ MVX(0xc001100e, "AMD_K8_WRMSR_BP?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001100f, "AMD_K8_WRMSR_BP_MASK?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011010, "AMD_K8_BH_TRACE_CTL?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011011, "AMD_K8_BH_TRACE_USRD?", 0, 0, 0), /* value=0xfffffcf0`093634f0 */
+ MVI(0xc0011012, "AMD_K7_UNK_c001_1012", UINT32_MAX),
+ MVI(0xc0011013, "AMD_K7_UNK_c001_1013", UINT64_MAX),
+ MVX(0xc0011014, "AMD_K8_XCPT_BP_RIP?", 0, 0, 0),
+ MVX(0xc0011015, "AMD_K8_XCPT_BP_RIP_MASK?", 0, 0, 0),
+ MVX(0xc0011016, "AMD_K8_COND_HDT_VAL?", 0, 0, 0),
+ MVX(0xc0011017, "AMD_K8_COND_HDT_VAL_MASK?", 0, 0, 0),
+ MVX(0xc0011018, "AMD_K8_XCPT_BP_CTL?", 0, 0, 0),
+ MVX(0xc001101d, "AMD_K8_NB_BIST?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0xc001101e, "AMD_K8_THERMTRIP_2?", 0x20), /* Villain? */
+ MVX(0xc001101f, "AMD_K8_NB_CFG?", UINT64_C(0x40000000810008), 0, 0),
+ MFX(0xc0011020, "AMD_K7_LS_CFG", AmdK7LoadStoreCfg, AmdK7LoadStoreCfg, 0, UINT64_C(0x3fffedafbffe2a), 0), /* value=0x0 */
+ MFW(0xc0011021, "AMD_K7_IC_CFG", AmdK7InstrCacheCfg, AmdK7InstrCacheCfg, UINT64_C(0xffffff0000000000)), /* value=0x0 */
+ MFX(0xc0011022, "AMD_K7_DC_CFG", AmdK7DataCacheCfg, AmdK7DataCacheCfg, 0, UINT64_C(0x1ffffbfffff13e0), 0), /* value=0x0 */
+ MFX(0xc0011023, "AMD_15H_CU_CFG", AmdFam15hCombUnitCfg, AmdFam15hCombUnitCfg, 0x220, UINT64_C(0x3ff03c760042000), 0), /* value=0x80004000`00000220 */
+ MFX(0xc0011024, "AMD_K7_DEBUG_CTL_2?", AmdK7DebugCtl2Maybe, AmdK7DebugCtl2Maybe, 0, UINT64_C(0xfffffffffffffe04), 0), /* value=0x0 */
+ MFN(0xc0011025, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMatchMaybe, AmdK7Dr0DataMatchMaybe), /* value=0x0 */
+ MFN(0xc0011026, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMaskMaybe, AmdK7Dr0DataMaskMaybe), /* value=0x0 */
+ MFX(0xc0011027, "AMD_K7_DR0_ADDR_MASK", AmdK7DrXAddrMaskN, AmdK7DrXAddrMaskN, 0x0, UINT64_C(0xfffffffffffff000), 0), /* value=0x0 */
+ MFX(0xc0011028, "AMD_15H_FP_CFG", AmdFam15hFpuCfg, AmdFam15hFpuCfg, 0, UINT64_C(0xffffe000000000ff), 0), /* value=0x40`e91d0000 */
+ MFX(0xc0011029, "AMD_15H_DC_CFG", AmdFam15hDecoderCfg, AmdFam15hDecoderCfg, 0, UINT64_C(0xffffffffc0188001), 0), /* value=0x488400 */
+ MFX(0xc001102a, "AMD_15H_CU_CFG2", AmdFam15hCombUnitCfg2, AmdFam15hCombUnitCfg2, 0, UINT64_C(0xfffbfb8ff2fc623f), 0), /* value=0x40040`00000cc0 */
+ MFX(0xc001102b, "AMD_15H_CU_CFG3", AmdFam15hCombUnitCfg3, AmdFam15hCombUnitCfg3, 0, UINT64_C(0xffe0027afff00000), 0), /* value=0x33400`00002b93 */
+ MFX(0xc001102c, "AMD_15H_EX_CFG", AmdFam15hExecUnitCfg, AmdFam15hExecUnitCfg, 0x7aac0, UINT64_C(0xffb0c003fbe00024), 0), /* value=0x400`0007aac0 */
+ MFX(0xc0011030, "AMD_10H_IBS_FETCH_CTL", AmdFam10hIbsFetchCtl, AmdFam10hIbsFetchCtl, 0, UINT64_C(0xfdfeffffffff0000), 0), /* value=0x0 */
+ MFI(0xc0011031, "AMD_10H_IBS_FETCH_LIN_ADDR", AmdFam10hIbsFetchLinAddr), /* value=0x0 */
+ MFI(0xc0011032, "AMD_10H_IBS_FETCH_PHYS_ADDR", AmdFam10hIbsFetchPhysAddr), /* value=0x0 */
+ MFX(0xc0011033, "AMD_10H_IBS_OP_EXEC_CTL", AmdFam10hIbsOpExecCtl, AmdFam10hIbsOpExecCtl, 0, UINT64_C(0xf8000000f8010000), 0), /* value=0x0 */
+ MFN(0xc0011034, "AMD_10H_IBS_OP_RIP", AmdFam10hIbsOpRip, AmdFam10hIbsOpRip), /* value=0x0 */
+ MFX(0xc0011035, "AMD_10H_IBS_OP_DATA", AmdFam10hIbsOpData, AmdFam10hIbsOpData, 0, UINT64_C(0xffffffc000000000), 0), /* value=0x0 */
+ MFX(0xc0011036, "AMD_10H_IBS_OP_DATA2", AmdFam10hIbsOpData2, AmdFam10hIbsOpData2, 0, UINT64_C(0xffffffffffffffc8), 0), /* value=0x0 */
+ MFX(0xc0011037, "AMD_10H_IBS_OP_DATA3", AmdFam10hIbsOpData3, AmdFam10hIbsOpData3, 0, UINT64_C(0xffff0000fff00400), 0), /* value=0x0 */
+ MFN(0xc0011038, "AMD_10H_IBS_DC_LIN_ADDR", AmdFam10hIbsDcLinAddr, AmdFam10hIbsDcLinAddr), /* value=0x0 */
+ MFX(0xc0011039, "AMD_10H_IBS_DC_PHYS_ADDR", AmdFam10hIbsDcPhysAddr, AmdFam10hIbsDcPhysAddr, 0, UINT64_C(0xffff000000000000), 0), /* value=0x0 */
+ MFO(0xc001103a, "AMD_10H_IBS_CTL", AmdFam10hIbsCtl), /* value=0x100 */
+ MFN(0xc001103b, "AMD_14H_IBS_BR_TARGET", AmdFam14hIbsBrTarget, AmdFam14hIbsBrTarget), /* value=0x0 */
+ MVX(0xc0011040, "AMD_15H_UNK_c001_1040", 0, UINT64_C(0xffe0000000000003), 0),
+ MVX(0xc0011041, "AMD_15H_UNK_c001_1041", UINT64_C(0x99dd57b219), 0xa0c820, 0),
+ MVX(0xc0011042, "AMD_15H_UNK_c001_1042", 0, 0, 0),
+ MVX(0xc0011043, "AMD_15H_UNK_c001_1043", UINT64_C(0x300000438), 0, 0),
+ MVX(0xc0011044, "AMD_15H_UNK_c001_1044", UINT64_C(0x300000438), 0, 0),
+ MVX(0xc0011045, "AMD_15H_UNK_c001_1045", UINT64_C(0x300000420), 0, 0),
+ MVX(0xc0011046, "AMD_15H_UNK_c001_1046", UINT64_C(0x300000420), 0, 0),
+ MVX(0xc0011047, "AMD_15H_UNK_c001_1047", 0, UINT64_C(0xffff000000000000), 0),
+ MVX(0xc0011048, "AMD_15H_UNK_c001_1048", 0xc000001, UINT64_C(0xffff000000000000), 0),
+ MVX(0xc0011049, "AMD_15H_UNK_c001_1049", 0, UINT64_C(0xffff000000000000), 0),
+ MVX(0xc001104a, "AMD_15H_UNK_c001_104a", 0, UINT64_C(0xffff000000000000), 0),
+ MVX(0xc001104b, "AMD_15H_UNK_c001_104b", 0, 0, 0),
+ MVX(0xc001104c, "AMD_15H_UNK_c001_104c", 0, 0, 0),
+ MVX(0xc001104d, "AMD_15H_UNK_c001_104d", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001104e, "AMD_15H_UNK_c001_104e", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc001104f, "AMD_15H_UNK_c001_104f", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011050, "AMD_15H_UNK_c001_1050", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011051, "AMD_15H_UNK_c001_1051", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011052, "AMD_15H_UNK_c001_1052", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011053, "AMD_15H_UNK_c001_1053", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011054, "AMD_15H_UNK_c001_1054", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011055, "AMD_15H_UNK_c001_1055", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011056, "AMD_15H_UNK_c001_1056", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011057, "AMD_15H_UNK_c001_1057", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011058, "AMD_15H_UNK_c001_1058", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc0011059, "AMD_15H_UNK_c001_1059", 0, UINT64_C(0xfffffc0000000000), 0),
+ MVX(0xc001105a, "AMD_15H_UNK_c001_105a", UINT64_C(0x3060c183060c183), UINT64_C(0x8000000000000000), 0),
+ MVX(0xc001105b, "AMD_15H_UNK_c001_105b", UINT64_C(0x318c6318c60c183), UINT64_C(0xe000000000000000), 0),
+ MVX(0xc001105c, "AMD_15H_UNK_c001_105c", 0, UINT64_C(0xff00000000000000), 0),
+ MVX(0xc001105d, "AMD_15H_UNK_c001_105d", 0, UINT64_C(0xff00000000000000), 0),
+ MVX(0xc001105e, "AMD_15H_UNK_c001_105e", 0, UINT64_C(0xfffffffffffffc00), 0),
+ MVX(0xc001105f, "AMD_15H_UNK_c001_105f", 0, UINT64_C(0xffff000000000000), 0),
+ MVX(0xc0011060, "AMD_15H_UNK_c001_1060", 0, UINT64_C(0xffff000000000000), 0),
+ MVX(0xc0011061, "AMD_15H_UNK_c001_1061", 0, 0, 0),
+ MVX(0xc0011062, "AMD_15H_UNK_c001_1062", 0, UINT64_C(0xffffffffffffe000), 0),
+ MVX(0xc0011063, "AMD_15H_UNK_c001_1063", 0, UINT64_C(0xfffffffffffe4000), 0),
+ MVX(0xc0011064, "AMD_15H_UNK_c001_1064", 0x1, UINT64_C(0xfffffffffffff000), 0),
+ MVX(0xc0011065, "AMD_15H_UNK_c001_1065", 0x1, UINT64_C(0xfffffffff0000000), 0),
+ MVX(0xc0011066, "AMD_15H_UNK_c001_1066", 0, 0, 0),
+ MVX(0xc0011067, "AMD_15H_UNK_c001_1067", 0x1, UINT64_C(0xffffffffffffff80), 0),
+ MVX(0xc0011068, "AMD_15H_UNK_c001_1068", 0, 0, 0),
+ MVX(0xc0011069, "AMD_15H_UNK_c001_1069", 0, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0xc001106a, "AMD_15H_UNK_c001_106a", 0x1, 0, 0),
+ MVX(0xc001106b, "AMD_15H_UNK_c001_106b", 0, UINT64_C(0xfffffffffffffff0), 0),
+ MVX(0xc001106c, "AMD_15H_UNK_c001_106c", 0x1, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0xc001106d, "AMD_15H_UNK_c001_106d", 0x1, UINT64_C(0xf000000000000080), 0),
+ MVX(0xc001106e, "AMD_15H_UNK_c001_106e", 0x1, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0xc001106f, "AMD_15H_UNK_c001_106f", 0x1, UINT64_C(0xfffffffffffff800), 0),
+ MVI(0xc0011070, "AMD_15H_UNK_c001_1070", UINT64_C(0x20000000000)),
+ MVX(0xc0011071, "AMD_15H_UNK_c001_1071", 0x400000, UINT64_C(0xffffffff01ffffff), 0),
+ MVI(0xc0011072, "AMD_15H_UNK_c001_1072", UINT64_C(0x101592c00000021)),
+ MVI(0xc0011073, "AMD_15H_UNK_c001_1073", UINT64_C(0xec541c0050000000)),
+ MVX(0xc0011080, "AMD_15H_UNK_c001_1080", 0, 0, 0),
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for AMD FX(tm)-8150 Eight-Core Processor.
+ */
+static CPUMDBENTRY const g_Entry_AMD_FX_8150_Eight_Core =
+{
+ /*.pszName = */ "AMD FX-8150 Eight-Core",
+ /*.pszFullName = */ "AMD FX(tm)-8150 Eight-Core Processor",
+ /*.enmVendor = */ CPUMCPUVENDOR_AMD,
+ /*.uFamily = */ 21,
+ /*.uModel = */ 1,
+ /*.uStepping = */ 2,
+ /*.enmMicroarch = */ kCpumMicroarch_AMD_15h_Bulldozer,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 48,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_AMD_FX_8150_Eight_Core),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_AMD_FX_8150_Eight_Core)),
+ /*.enmUnknownCpuId = */ CPUMUKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_AMD_FX_8150_Eight_Core)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_AMD_FX_8150_Eight_Core),
+};
+
+#endif /* !VBOX_DB_AMD_FX_8150_Eight_Core */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/AMD_Phenom_II_X6_1100T.h b/src/VBox/VMM/VMMR3/cpus/AMD_Phenom_II_X6_1100T.h
new file mode 100644
index 00000000..8caceec9
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/AMD_Phenom_II_X6_1100T.h
@@ -0,0 +1,268 @@
+/* $Id: AMD_Phenom_II_X6_1100T.h $ */
+/** @file
+ * CPU database entry "AMD Phenom II X6 1100T".
+ * Generated at 2013-12-17T13:39:08Z by VBoxCpuReport v4.3.53r91360 on linux.amd64.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VBOX_CPUDB_AMD_Phenom_II_X6_1100T
+#define VBOX_CPUDB_AMD_Phenom_II_X6_1100T
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for AMD Phenom(tm) II X6 1100T Processor.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_AMD_Phenom_II_X6_1100T[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000006, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00100fa0, 0x01060800, 0x00802009, 0x178bfbff, 0 },
+ { 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00000000, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x8000001b, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00100fa0, 0x100000a1, 0x000837ff, 0xefd3fbff, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20444d41, 0x6e656850, 0x74286d6f, 0x4920296d, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x36582049, 0x30313120, 0x50205430, 0x65636f72, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x726f7373, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0xff30ff10, 0xff30ff20, 0x40020140, 0x40020140, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x20800000, 0x42004200, 0x02008140, 0x0030b140, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000003f9, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003030, 0x00000000, 0x00003005, 0x00000000, 0 },
+ { 0x80000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000a, 0x00000000, 0x00000000, 0x00000001, 0x00000040, 0x00000000, 0x0000040f, 0 },
+ { 0x8000000b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000d, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000010, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000012, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000013, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000014, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000015, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000016, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000017, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000019, 0x00000000, 0x00000000, 0xf0300000, 0x60100000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001a, 0x00000000, 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001b, 0x00000000, 0x00000000, 0x0000001f, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for AMD Phenom(tm) II X6 1100T Processor.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_AMD_Phenom_II_X6_1100T[] =
+{
+ MAL(0x00000000, "IA32_P5_MC_ADDR", 0x00000402),
+ MAL(0x00000001, "IA32_P5_MC_TYPE", 0x00000401),
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x6db`c482d0b9 */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xffff0000000006ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVO(0x0000008b, "BBL_CR_D3|BIOS_SIGN", 0x10000bf),
+ MFX(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf, 0, UINT64_C(0x8644930520000000), 0), /* value=0xa66664d9`32c329b1 */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x25`092f34be */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x10 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x8174c700 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x106, 0, 0), /* value=0x106 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, UINT64_C(0xfffffffffffffff8), 0), /* value=0x0 */
+ MFX(0x0000017b, "IA32_MCG_CTL", Ia32McgCtl, Ia32McgCtl, 0, UINT64_C(0xffffffffffffffc0), 0), /* value=0x3f */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, UINT64_C(0xffffffffffffff80), 0x40), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0xffffefdf`00890004 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xffffeed0`c7b3ffbc */
+ MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp), /* value=0x0 */
+ MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp), /* value=0x0 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffff000000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`00000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffff000000000ff8)), /* value=0xbdf00000 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`fff00800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffff000000000ff8)), /* value=0xbe000000 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`fe000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffff000000000ff8)), /* value=0xc0000000 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`c0000800 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RFN(0x00000400, 0x00000417, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0xfe, UINT64_C(0xffffffffffff8200)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xffffffff`8174b4f0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xffffffff`8174c860 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x3700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x7f01`3f916740 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffff8804`3fc00000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0xf2c95840 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RSN(0xc0000408, 0xc000040a, "AMD_10H_MC4_MISCn", AmdFam10hMc4MiscN, AmdFam10hMc4MiscN, 0, UINT64_C(0xff00f000ffffffff), 0),
+ RVI(0xc000040b, 0xc000040f, "AMD_10H_MC4_MISCn", 0),
+ RSN(0xc0010000, 0xc0010003, "AMD_K8_PERF_CTL_n", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x0, UINT64_C(0xfffffcf000200000), 0),
+ RSN(0xc0010004, 0xc0010007, "AMD_K8_PERF_CTR_n", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x0, UINT64_C(0xffff000000000000), 0),
+ MFX(0xc0010010, "AMD_K8_SYS_CFG", AmdK8SysCfg, AmdK8SysCfg, 0x760600, UINT64_C(0xffffffffff80f8ff), 0), /* value=0x760600 */
+ MFX(0xc0010015, "AMD_K8_HW_CFG", AmdK8HwCr, AmdK8HwCr, 0x1000031, UINT64_C(0xffffffff00006020), 0), /* value=0x1000031 */
+ MFW(0xc0010016, "AMD_K8_IORR_BASE_0", AmdK8IorrBaseN, AmdK8IorrBaseN, UINT64_C(0xffff000000000fe7)), /* value=0x3`40200000 */
+ MFW(0xc0010017, "AMD_K8_IORR_MASK_0", AmdK8IorrMaskN, AmdK8IorrMaskN, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0xc0010018, "AMD_K8_IORR_BASE_1", AmdK8IorrBaseN, AmdK8IorrBaseN, 0x1, UINT64_C(0xffff000000000fe7), 0), /* value=0x0 */
+ MFX(0xc0010019, "AMD_K8_IORR_MASK_1", AmdK8IorrMaskN, AmdK8IorrMaskN, 0x1, UINT64_C(0xffff0000000007ff), 0), /* value=0x0 */
+ MFW(0xc001001a, "AMD_K8_TOP_MEM", AmdK8TopOfMemN, AmdK8TopOfMemN, UINT64_C(0xffff0000007fffff)), /* value=0xc0000000 */
+ MFX(0xc001001d, "AMD_K8_TOP_MEM2", AmdK8TopOfMemN, AmdK8TopOfMemN, 0x1, UINT64_C(0xffff0000007fffff), 0), /* value=0x4`40000000 */
+ MFN(0xc001001f, "AMD_K8_NB_CFG1", AmdK8NbCfg1, AmdK8NbCfg1), /* value=0x584000`00000008 */
+ MFN(0xc0010020, "AMD_K8_PATCH_LOADER", WriteOnly, AmdK8PatchLoader),
+ MFN(0xc0010021, "AMD_10H_UNK_c001_0021", WriteOnly, IgnoreWrite),
+ MFX(0xc0010022, "AMD_K8_MC_XCPT_REDIR", AmdK8McXcptRedir, AmdK8McXcptRedir, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RFN(0xc0010030, 0xc0010035, "AMD_K8_CPU_NAME_n", AmdK8CpuNameN, AmdK8CpuNameN),
+ MFX(0xc001003e, "AMD_K8_HTC", AmdK8HwThermalCtrl, AmdK8HwThermalCtrl, 0x4a4c0005, UINT64_C(0xffffffffb0008838), 0), /* value=0x4a4c0005 */
+ MFX(0xc001003f, "AMD_K8_STC", AmdK8SwThermalCtrl, AmdK8SwThermalCtrl, 0, UINT64_C(0xffffffffc00088c0), 0), /* value=0x10000000 */
+ MVO(0xc0010043, "AMD_K8_THERMTRIP_STATUS", 0x1dc01430),
+ MFX(0xc0010044, "AMD_K8_MC_CTL_MASK_0", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x0, UINT64_C(0xffffffffffffff00), 0), /* value=0x80 */
+ MFX(0xc0010045, "AMD_K8_MC_CTL_MASK_1", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x1, ~(uint64_t)UINT32_MAX, 0), /* value=0x80 */
+ MFX(0xc0010046, "AMD_K8_MC_CTL_MASK_2", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x2, UINT64_C(0xfffffffffffff000), 0), /* value=0x200 */
+ MFX(0xc0010047, "AMD_K8_MC_CTL_MASK_3", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x3, UINT64_C(0xfffffffffffffffc), 0), /* value=0x0 */
+ MFX(0xc0010048, "AMD_K8_MC_CTL_MASK_4", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x4, UINT64_C(0xffffffffc0000000), 0), /* value=0x780400 */
+ MFX(0xc0010049, "AMD_K8_MC_CTL_MASK_5", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x5, UINT64_C(0xfffffffffffffffe), 0), /* value=0x0 */
+ RFN(0xc0010050, 0xc0010053, "AMD_K8_SMI_ON_IO_TRAP_n", AmdK8SmiOnIoTrapN, AmdK8SmiOnIoTrapN),
+ MFX(0xc0010054, "AMD_K8_SMI_ON_IO_TRAP_CTL_STS", AmdK8SmiOnIoTrapCtlSts, AmdK8SmiOnIoTrapCtlSts, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0010055, "AMD_K8_INT_PENDING_MSG", AmdK8IntPendingMessage, AmdK8IntPendingMessage, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x14000815 */
+ MFX(0xc0010056, "AMD_K8_SMI_TRIGGER_IO_CYCLE", AmdK8SmiTriggerIoCycle, AmdK8SmiTriggerIoCycle, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x2000000 */
+ MFX(0xc0010058, "AMD_10H_MMIO_CFG_BASE_ADDR", AmdFam10hMmioCfgBaseAddr, AmdFam10hMmioCfgBaseAddr, 0, UINT64_C(0xffff0000000fffc0), 0), /* value=0xe0000021 */
+ MFX(0xc0010059, "AMD_10H_TRAP_CTL?", AmdFam10hTrapCtlMaybe, AmdFam10hTrapCtlMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVX(0xc001005a, "AMD_10H_UNK_c001_005a", 0, 0, 0),
+ MVX(0xc001005b, "AMD_10H_UNK_c001_005b", 0, 0, 0),
+ MVX(0xc001005c, "AMD_10H_UNK_c001_005c", 0, 0, 0),
+ MVX(0xc001005d, "AMD_10H_UNK_c001_005d", 0, 0, 0),
+ MVO(0xc0010060, "AMD_K8_BIST_RESULT", 0),
+ MFX(0xc0010061, "AMD_10H_P_ST_CUR_LIM", AmdFam10hPStateCurLimit, ReadOnly, 0x30, 0, 0), /* value=0x30 */
+ MFX(0xc0010062, "AMD_10H_P_ST_CTL", AmdFam10hPStateControl, AmdFam10hPStateControl, 0x3, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x3 */
+ MFX(0xc0010063, "AMD_10H_P_ST_STS", AmdFam10hPStateStatus, ReadOnly, 0x3, 0, 0), /* value=0x3 */
+ MFX(0xc0010064, "AMD_10H_P_ST_0", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000019e40001015), 0, 0), /* value=0x8000019e`40001015 */
+ MFX(0xc0010065, "AMD_10H_P_ST_1", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000019f40002411), 0, 0), /* value=0x8000019f`40002411 */
+ MFX(0xc0010066, "AMD_10H_P_ST_2", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000017540002809), 0, 0), /* value=0x80000175`40002809 */
+ MFX(0xc0010067, "AMD_10H_P_ST_3", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000015540002c01), 0, 0), /* value=0x80000155`40002c01 */
+ MFX(0xc0010068, "AMD_10H_P_ST_4", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000013340003840), 0, 0), /* value=0x80000133`40003840 */
+ MFX(0xc0010070, "AMD_10H_COFVID_CTL", AmdFam10hCofVidControl, AmdFam10hCofVidControl, 0x40043840, UINT64_C(0xffffffff01b80000), 0), /* value=0x40043840 */
+ MFX(0xc0010071, "AMD_10H_COFVID_STS", AmdFam10hCofVidStatus, AmdFam10hCofVidStatus, UINT64_C(0x140043840), UINT64_MAX, 0), /* value=0x1`40043840 */
+ MFO(0xc0010073, "AMD_10H_C_ST_IO_BASE_ADDR", AmdFam10hCStateIoBaseAddr), /* value=0x814 */
+ MFX(0xc0010074, "AMD_10H_CPU_WD_TMR_CFG", AmdFam10hCpuWatchdogTimer, AmdFam10hCpuWatchdogTimer, 0, UINT64_C(0xffffffffffffff80), 0), /* value=0x0 */
+ MFX(0xc0010111, "AMD_K8_SMM_BASE", AmdK8SmmBase, AmdK8SmmBase, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0xbdef8000 */
+ MFX(0xc0010112, "AMD_K8_SMM_ADDR", AmdK8SmmAddr, AmdK8SmmAddr, 0, UINT64_C(0xffff00000001ffff), 0), /* value=0xbdf00000 */
+ MFX(0xc0010113, "AMD_K8_SMM_MASK", AmdK8SmmMask, AmdK8SmmMask, 0, UINT64_C(0xffff0000000188c0), 0), /* value=0xffff`fff00003 */
+ MFX(0xc0010114, "AMD_K8_VM_CR", AmdK8VmCr, AmdK8VmCr, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xffffffe0)), /* value=0x8 */
+ MFX(0xc0010115, "AMD_K8_IGNNE", AmdK8IgnNe, AmdK8IgnNe, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xfffffffe)), /* value=0x0 */
+ MFX(0xc0010117, "AMD_K8_VM_HSAVE_PA", AmdK8VmHSavePa, AmdK8VmHSavePa, 0, 0, UINT64_C(0xffff000000000fff)), /* value=0x0 */
+ MFN(0xc0010118, "AMD_10H_VM_LOCK_KEY", AmdFam10hVmLockKey, AmdFam10hVmLockKey), /* value=0x0 */
+ MFN(0xc0010119, "AMD_10H_SSM_LOCK_KEY", AmdFam10hSmmLockKey, AmdFam10hSmmLockKey), /* value=0x0 */
+ MFX(0xc001011a, "AMD_10H_LOCAL_SMI_STS", AmdFam10hLocalSmiStatus, AmdFam10hLocalSmiStatus, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0010140, "AMD_10H_OSVW_ID_LEN", AmdFam10hOsVisWrkIdLength, AmdFam10hOsVisWrkIdLength, 0x4, 0, 0), /* value=0x4 */
+ MFX(0xc0010141, "AMD_10H_OSVW_STS", AmdFam10hOsVisWrkStatus, AmdFam10hOsVisWrkStatus, 0xe, 0, 0), /* value=0xe */
+ MFX(0xc0011000, "AMD_K7_MCODE_CTL", AmdK7MicrocodeCtl, AmdK7MicrocodeCtl, 0, ~(uint64_t)UINT32_MAX, 0x4), /* value=0x0 */
+ MFX(0xc0011001, "AMD_K7_APIC_CLUSTER_ID", AmdK7ClusterIdMaybe, AmdK7ClusterIdMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0xc0011004, "AMD_K8_CPUID_CTL_STD01", AmdK8CpuIdCtlStd01hEdcx, AmdK8CpuIdCtlStd01hEdcx), /* value=0x802009`178bfbff */
+ MFN(0xc0011005, "AMD_K8_CPUID_CTL_EXT01", AmdK8CpuIdCtlExt01hEdcx, AmdK8CpuIdCtlExt01hEdcx), /* value=0x837ff`efd3fbff */
+ MFX(0xc0011006, "AMD_K7_DEBUG_STS?", AmdK7DebugStatusMaybe, AmdK7DebugStatusMaybe, 0, UINT64_C(0xffffffff00000080), 0), /* value=0x10 */
+ MFN(0xc0011007, "AMD_K7_BH_TRACE_BASE?", AmdK7BHTraceBaseMaybe, AmdK7BHTraceBaseMaybe), /* value=0x0 */
+ MFN(0xc0011008, "AMD_K7_BH_TRACE_PTR?", AmdK7BHTracePtrMaybe, AmdK7BHTracePtrMaybe), /* value=0x0 */
+ MFN(0xc0011009, "AMD_K7_BH_TRACE_LIM?", AmdK7BHTraceLimitMaybe, AmdK7BHTraceLimitMaybe), /* value=0x0 */
+ MFX(0xc001100a, "AMD_K7_HDT_CFG?", AmdK7HardwareDebugToolCfgMaybe, AmdK7HardwareDebugToolCfgMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc001100b, "AMD_K7_FAST_FLUSH_COUNT?", AmdK7FastFlushCountMaybe, AmdK7FastFlushCountMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x7c0 */
+ MFX(0xc001100c, "AMD_K7_NODE_ID", AmdK7NodeId, AmdK7NodeId, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVX(0xc001100d, "AMD_K8_LOGICAL_CPUS_NUM?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001100e, "AMD_K8_WRMSR_BP?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001100f, "AMD_K8_WRMSR_BP_MASK?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011010, "AMD_K8_BH_TRACE_CTL?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011011, "AMD_K8_BH_TRACE_USRD?", 0, 0, 0), /* value=0x259a5de0`ffffffff */
+ MVX(0xc0011014, "AMD_K8_XCPT_BP_RIP?", 0, 0, 0),
+ MVX(0xc0011015, "AMD_K8_XCPT_BP_RIP_MASK?", 0, 0, 0),
+ MVX(0xc0011016, "AMD_K8_COND_HDT_VAL?", 0, 0, 0),
+ MVX(0xc0011017, "AMD_K8_COND_HDT_VAL_MASK?", 0, 0, 0),
+ MVX(0xc0011018, "AMD_K8_XCPT_BP_CTL?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001101d, "AMD_K8_NB_BIST?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0xc001101e, "AMD_K8_THERMTRIP_2?", 0x1dc01430), /* Villain? */
+ MVX(0xc001101f, "AMD_K8_NB_CFG?", UINT64_C(0x58400000000008), 0, 0),
+ MFX(0xc0011020, "AMD_K7_LS_CFG", AmdK7LoadStoreCfg, AmdK7LoadStoreCfg, 0, UINT64_C(0xfffc012000000000), 0), /* value=0x20010`00001000 */
+ MFW(0xc0011021, "AMD_K7_IC_CFG", AmdK7InstrCacheCfg, AmdK7InstrCacheCfg, ~(uint64_t)UINT32_MAX), /* value=0x0 */
+ MFX(0xc0011022, "AMD_K7_DC_CFG", AmdK7DataCacheCfg, AmdK7DataCacheCfg, 0, UINT64_C(0xffc0000000000000), 0), /* value=0x9c`49000000 */
+ MFN(0xc0011023, "AMD_K7_BU_CFG", AmdK7BusUnitCfg, AmdK7BusUnitCfg), /* Villain? value=0x10200020 */
+ MFX(0xc0011024, "AMD_K7_DEBUG_CTL_2?", AmdK7DebugCtl2Maybe, AmdK7DebugCtl2Maybe, 0, UINT64_C(0xffffffffffffff00), 0), /* value=0x0 */
+ MFN(0xc0011025, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMatchMaybe, AmdK7Dr0DataMatchMaybe), /* value=0x0 */
+ MFN(0xc0011026, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMaskMaybe, AmdK7Dr0DataMaskMaybe), /* value=0x0 */
+ MFX(0xc0011027, "AMD_K7_DR0_ADDR_MASK", AmdK7DrXAddrMaskN, AmdK7DrXAddrMaskN, 0x0, UINT64_C(0xfffffffffffff000), 0), /* value=0x0 */
+ MVX(0xc0011028, "AMD_10H_UNK_c001_1028", 0, UINT64_C(0xfffffffffffffff8), 0),
+ MVX(0xc0011029, "AMD_10H_UNK_c001_1029", 0, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0xc001102a, "AMD_10H_BU_CFG2", AmdFam10hBusUnitCfg2, AmdFam10hBusUnitCfg2, 0, UINT64_C(0xfff00000c0000000), 0), /* value=0x40050`01000040 */
+ MFX(0xc0011030, "AMD_10H_IBS_FETCH_CTL", AmdFam10hIbsFetchCtl, AmdFam10hIbsFetchCtl, 0, UINT64_C(0xfdfcffff00000000), 0), /* value=0x140003`00000000 */
+ MFI(0xc0011031, "AMD_10H_IBS_FETCH_LIN_ADDR", AmdFam10hIbsFetchLinAddr), /* value=0xffffffff`a08cf13e */
+ MFI(0xc0011032, "AMD_10H_IBS_FETCH_PHYS_ADDR", AmdFam10hIbsFetchPhysAddr), /* value=0x4`24ce313e */
+ MFX(0xc0011033, "AMD_10H_IBS_OP_EXEC_CTL", AmdFam10hIbsOpExecCtl, AmdFam10hIbsOpExecCtl, 0, UINT64_C(0xfffffffffff00000), 0), /* value=0x0 */
+ MFN(0xc0011034, "AMD_10H_IBS_OP_RIP", AmdFam10hIbsOpRip, AmdFam10hIbsOpRip), /* value=0x4d231923 */
+ MFI(0xc0011035, "AMD_10H_IBS_OP_DATA", AmdFam10hIbsOpData), /* value=0x12`7fc7bc0e */
+ MFX(0xc0011036, "AMD_10H_IBS_OP_DATA2", AmdFam10hIbsOpData2, AmdFam10hIbsOpData2, 0, UINT64_C(0xffffffffffffffc8), 0), /* value=0x0 */
+ MFI(0xc0011037, "AMD_10H_IBS_OP_DATA3", AmdFam10hIbsOpData3), /* value=0x0 */
+ MFX(0xc0011038, "AMD_10H_IBS_DC_LIN_ADDR", AmdFam10hIbsDcLinAddr, AmdFam10hIbsDcLinAddr, 0, UINT64_C(0x7fffffffffff), 0), /* value=0x0 */
+ MFI(0xc0011039, "AMD_10H_IBS_DC_PHYS_ADDR", AmdFam10hIbsDcPhysAddr), /* value=0x0 */
+ MFO(0xc001103a, "AMD_10H_IBS_CTL", AmdFam10hIbsCtl), /* value=0x101 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for AMD Phenom(tm) II X6 1100T Processor.
+ */
+static CPUMDBENTRY const g_Entry_AMD_Phenom_II_X6_1100T =
+{
+ /*.pszName = */ "AMD Phenom II X6 1100T",
+ /*.pszFullName = */ "AMD Phenom(tm) II X6 1100T Processor",
+ /*.enmVendor = */ CPUMCPUVENDOR_AMD,
+ /*.uFamily = */ 16,
+ /*.uModel = */ 10,
+ /*.uStepping = */ 0,
+ /*.enmMicroarch = */ kCpumMicroarch_AMD_K10,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 48,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_AMD_Phenom_II_X6_1100T),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_AMD_Phenom_II_X6_1100T)),
+ /*.enmUnknownCpuId = */ CPUMUKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_AMD_Phenom_II_X6_1100T)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_AMD_Phenom_II_X6_1100T),
+};
+
+#endif /* !VBOX_DB_AMD_Phenom_II_X6_1100T */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Core_i5_3570.h b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i5_3570.h
new file mode 100644
index 00000000..7e4c6f56
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i5_3570.h
@@ -0,0 +1,335 @@
+/* $Id: Intel_Core_i5_3570.h $ */
+/** @file
+ * CPU database entry "Intel Core i5-3570".
+ * Generated at 2013-12-13T16:13:56Z by VBoxCpuReport v4.3.53r91216 on linux.amd64.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VBOX_CPUDB_Intel_Core_i5_3570
+#define VBOX_CPUDB_Intel_Core_i5_3570
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Core(TM) i5-3570 CPU @ 3.40GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Core_i5_3570[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000d, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000306a9, 0x04100800, 0x7fbae3ff, 0xbfebfbff, 0 },
+ { 0x00000002, 0x00000000, 0x00000000, 0x76035a01, 0x00f0b0ff, 0x00000000, 0x00ca0000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, 0x00000000, 0x1c004121, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00001120, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000077, 0x00000002, 0x00000009, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000281, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07300803, 0x00000000, 0x00000000, 0x00000603, 0 },
+ { 0x0000000b, 0x00000000, 0x00000000, 0x00000001, 0x00000001, 0x00000100, 0x00000004, 0 },
+ { 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000000, 0x00000000, 0x00000007, 0x00000340, 0x00000340, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x28100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x65746e49, 0x2952286c, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x726f4320, 0x4d542865, 0x35692029, 0x3735332d, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x50432030, 0x20402055, 0x30342e33, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01006040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000100, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003024, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Core(TM) i5-3570 CPU @ 3.40GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Core_i5_3570[] =
+{
+ MFX(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr, Ia32P5McAddr, 0, UINT64_C(0xffffffffffffffe0), 0), /* value=0x1f */
+ MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX), /* value=0x0 */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, 0, UINT64_C(0xffffffffffff0000)), /* value=0x40 */
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x4293`b0a3f54a */
+ MFV(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x4000000000000)),
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00c00), 0, UINT64_C(0xfffffff0000002ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVX(0x0000002e, "I7_UNK_0000_002e", 0, 0x400, UINT64_C(0xfffffffffffffbff)),
+ MVX(0x00000033, "TEST_CTL", 0, 0, UINT64_C(0xffffffff7fffffff)),
+ MVO(0x00000034, "P6_UNK_0000_0034", 0x285),
+ MFO(0x00000035, "MSR_CORE_THREAD_COUNT", IntelI7CoreThreadCount), /* value=0x40004*/
+ MVO(0x00000036, "I7_UNK_0000_0036", UINT64_C(0x1000000000105df2)),
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x5 */
+ MVX(0x0000003e, "I7_UNK_0000_003e", 0x1, 0, UINT64_C(0xfffffffffffffffe)),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, IgnoreWrite),
+ MVX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", UINT64_C(0x1900000000), 0x1, UINT32_C(0xfffffffe)),
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ RSN(0x000000c1, 0x000000c8, "IA32_PMCn", Ia32PmcN, Ia32PmcN, 0x0, ~(uint64_t)UINT32_MAX, 0),
+ MFO(0x000000ce, "MSR_PLATFORM_INFO", IntelPlatformInfo), /* value=0x81010'e0012200*/
+ MFX(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl, IntelPkgCStConfigControl, 0, 0, UINT64_C(0xffffffffe1ffffff)), /* value=0x1e008403 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, 0, UINT64_C(0xfffffffffff80000)), /* value=0x10414 */
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0x3a`2c710584 */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x39`f97c8410 */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0xd0a, 0, 0), /* value=0xd0a */
+ MVX(0x00000102, "I7_IB_UNK_0000_0102", 0, 0, UINT64_C(0xffffffff7fff8000)),
+ MVX(0x00000103, "I7_IB_UNK_0000_0103", 0, 0, UINT64_C(0xffffffffffffff00)),
+ MVX(0x00000104, "I7_IB_UNK_0000_0104", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MFN(0x00000132, "CPUID1_FEATURE_MASK", IntelCpuId1FeatureMaskEax, IntelCpuId1FeatureMaskEax), /* value=0xffffffff`ffffffff */
+ MFN(0x00000133, "CPUIDD_01_FEATURE_MASK", IntelCpuId1FeatureMaskEcdx, IntelCpuId1FeatureMaskEcdx), /* value=0xffffffff`ffffffff */
+ MFN(0x00000134, "CPUID80000001_FEATURE_MASK", IntelCpuId80000001FeatureMaskEcdx, IntelCpuId80000001FeatureMaskEcdx), /* value=0xffffffff`ffffffff */
+ MFX(0x0000013c, "I7_SB_AES_NI_CTL", IntelI7SandyAesNiCtl, IntelI7SandyAesNiCtl, 0, 0, UINT64_C(0xfffffffffffffffc)), /* value=0x0 */
+ MVX(0x00000140, "I7_IB_UNK_0000_0140", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MVX(0x00000142, "I7_IB_UNK_0000_0142", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x10 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, 0, UINT64_C(0xffff800000000000)), /* value=0x0 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, 0, UINT64_C(0xffff800000000000)), /* value=0xffffffff`8159cbe0 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0xc09, 0, 0), /* value=0xc09 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x0 */
+ RSN(0x00000186, 0x0000018d, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, 0, UINT64_C(0xffffffff00080000)),
+ MFX(0x00000194, "CLOCK_FLEX_MAX", IntelFlexRatio, IntelFlexRatio, 0x190000, 0x1e00ff, UINT64_C(0xffffffffffe00000)),
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0x1d2400001000), 0, 0), /* value=0x1d24`00001000 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x1000, 0, 0), /* Might bite. value=0x1000 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x0 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0x1000013, 0, UINT64_C(0xfffffffffe0000e8)), /* value=0x1000013 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, UINT32_C(0x884c0000), UINT32_C(0xf87f0fff), UINT64_C(0xffffffff0780f000)), /* value=0x884c0000 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x850089, 0x1080, UINT64_C(0xffffffbbff3aef72)), /* value=0x850089 */
+ MFX(0x000001a2, "I7_MSR_TEMPERATURE_TARGET", IntelI7TemperatureTarget, IntelI7TemperatureTarget, 0x691400, 0xffff00, UINT64_C(0xfffffffff00000ff)), /* value=0x691400 */
+ MVX(0x000001a4, "I7_UNK_0000_01a4", 0, 0, UINT64_C(0xfffffffffffff7f0)),
+ RSN(0x000001a6, 0x000001a7, "I7_MSR_OFFCORE_RSP_n", IntelI7MsrOffCoreResponseN, IntelI7MsrOffCoreResponseN, 0x0, 0, UINT64_C(0xffffffc000007000)),
+ MVX(0x000001a8, "I7_UNK_0000_01a8", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFX(0x000001aa, "MSR_MISC_PWR_MGMT", IntelI7MiscPwrMgmt, IntelI7MiscPwrMgmt, 0, 0, UINT64_C(0xffffffffffbffffe)), /* value=0x400000 */
+ MFX(0x000001ad, "I7_MSR_TURBO_RATIO_LIMIT", IntelI7TurboRatioLimit, ReadOnly, 0x24252626, 0, 0), /* value=0x24252626 */
+ MVX(0x000001b0, "IA32_ENERGY_PERF_BIAS", 0x6, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x000001b1, "IA32_PACKAGE_THERM_STATUS", UINT32_C(0x88490000), UINT32_C(0xf87f0fff), UINT64_C(0xffffffff0780f000)),
+ MVX(0x000001b2, "IA32_PACKAGE_THERM_INTERRUPT", 0x1000003, 0, UINT64_C(0xfffffffffe0000e8)),
+ MVO(0x000001c6, "I7_UNK_0000_01c6", 0x3),
+ MFX(0x000001c8, "MSR_LBR_SELECT", IntelI7LbrSelect, IntelI7LbrSelect, 0, 0, UINT64_C(0xfffffffffffffe00)), /* value=0x0 */
+ MFX(0x000001c9, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, 0, UINT64_C(0xfffffffffffffff0)), /* value=0x8 */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffff803c)), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0x7fffffff`a061f4c9 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xffffffff`810473c0 */
+ MFX(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp, P6LastIntFromIp, 0, 0, UINT64_C(0x7fff800000000000)), /* value=0x0 */
+ MFX(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp, P6LastIntToIp, 0, 0, UINT64_C(0xffff800000000000)), /* value=0x0 */
+ MFO(0x000001f0, "I7_VLW_CAPABILITY", IntelI7VirtualLegacyWireCap), /* value=0x74 */
+ MFO(0x000001f2, "IA32_SMRR_PHYSBASE", Ia32SmrrPhysBase), /* value=0xdb000006 */
+ MFO(0x000001f3, "IA32_SMRR_PHYSMASK", Ia32SmrrPhysMask), /* value=0xff800800 */
+ MFX(0x000001fc, "I7_MSR_POWER_CTL", IntelI7PowerCtl, IntelI7PowerCtl, 0, 0x20, UINT64_C(0xffffffffffc20000)), /* value=0x14005f */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xc`00000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x4`00000006 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`e0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xe0000000 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`e0000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xdc000000 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`fc000800 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xdb800000 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ff800800 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x4`1f000000 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ff000800 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x4`1e800000 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ff800800 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x4`1e600000 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ffe00800 */
+ MFX(0x00000210, "IA32_MTRR_PHYS_BASE8", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x8, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000211, "IA32_MTRR_PHYS_MASK8", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x8, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x00000212, "IA32_MTRR_PHYS_BASE9", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x9, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000213, "IA32_MTRR_PHYS_MASK9", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x9, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ RSN(0x00000280, 0x00000281, "IA32_MC0_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x0, 0, UINT64_C(0xffffffffbfff8000)),
+ MFX(0x00000282, "IA32_MC2_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x2, 0x40007fff, UINT64_C(0xffffffffbfff8000)), /* value=0x0 */
+ MFX(0x00000283, "IA32_MC3_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x3, 0, UINT64_C(0xffffffffbfff8000)), /* value=0x40000001 */
+ MFX(0x00000284, "IA32_MC4_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x4, 0x40007fff, UINT64_C(0xffffffffbfff8000)), /* value=0x0 */
+ RSN(0x00000285, 0x00000288, "IA32_MC5_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x5, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x000002e0, "I7_SB_NO_EVICT_MODE", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFN(0x000002e6, "I7_IB_UNK_0000_02e6", WriteOnly, IgnoreWrite),
+ MVX(0x000002e7, "I7_IB_UNK_0000_02e7", 0x1, 0x1, UINT64_C(0xfffffffffffffffe)),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ MVO(0x00000305, "I7_SB_UNK_0000_0305", 0),
+ MFX(0x00000309, "IA32_FIXED_CTR0", Ia32FixedCtrN, Ia32FixedCtrN, 0x0, 0, UINT64_C(0xffff000000000000)), /* value=0x46 */
+ MFX(0x0000030a, "IA32_FIXED_CTR1", Ia32FixedCtrN, Ia32FixedCtrN, 0x1, 0x816506, UINT64_C(0xffff000000000000)), /* value=0xffff`d65aa6fb */
+ MFX(0x0000030b, "IA32_FIXED_CTR2", Ia32FixedCtrN, Ia32FixedCtrN, 0x2, 0, UINT64_C(0xffff000000000000)), /* value=0x264 */
+ MFX(0x00000345, "IA32_PERF_CAPABILITIES", Ia32PerfCapabilities, ReadOnly, 0x31c3, 0, 0), /* value=0x31c3 */
+ MFX(0x0000038d, "IA32_FIXED_CTR_CTRL", Ia32FixedCtrCtrl, Ia32FixedCtrCtrl, 0, 0, UINT64_C(0xfffffffffffff000)), /* value=0xb0 */
+ MFX(0x0000038e, "IA32_PERF_GLOBAL_STATUS", Ia32PerfGlobalStatus, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x0000038f, "IA32_PERF_GLOBAL_CTRL", Ia32PerfGlobalCtrl, Ia32PerfGlobalCtrl, 0, 0, UINT64_C(0xfffffff8ffffff00)), /* value=0x7`000000ff */
+ MFX(0x00000390, "IA32_PERF_GLOBAL_OVF_CTRL", Ia32PerfGlobalOvfCtrl, Ia32PerfGlobalOvfCtrl, 0, UINT64_C(0xe0000007000000ff), UINT64_C(0x1ffffff8ffffff00)), /* value=0x0 */
+ MFX(0x00000391, "I7_UNC_PERF_GLOBAL_CTRL", IntelI7UncPerfGlobalCtrl, IntelI7UncPerfGlobalCtrl, 0, 0, UINT64_C(0xffffffff1fffffe0)), /* value=0x2000000f */
+ MFX(0x00000392, "I7_UNC_PERF_GLOBAL_STATUS", IntelI7UncPerfGlobalStatus, IntelI7UncPerfGlobalStatus, 0, 0xf, UINT64_C(0xfffffffffffffff0)), /* value=0x0 */
+ MFX(0x00000393, "I7_UNC_PERF_GLOBAL_OVF_CTRL", IntelI7UncPerfGlobalOvfCtrl, IntelI7UncPerfGlobalOvfCtrl, 0, 0x3, UINT64_C(0xfffffffffffffffc)), /* value=0x0 */
+ MFX(0x00000394, "I7_UNC_PERF_FIXED_CTR_CTRL", IntelI7UncPerfFixedCtrCtrl, IntelI7UncPerfFixedCtrCtrl, 0, 0, UINT64_C(0xffffffffffafffff)), /* value=0x0 */
+ MFX(0x00000395, "I7_UNC_PERF_FIXED_CTR", IntelI7UncPerfFixedCtr, IntelI7UncPerfFixedCtr, 0, 0, UINT64_C(0xffff000000000000)), /* value=0x1950 */
+ MFO(0x00000396, "I7_UNC_CBO_CONFIG", IntelI7UncCBoxConfig), /* value=0x5 */
+ MVX(0x00000397, "I7_IB_UNK_0000_0397", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MFX(0x000003b0, "I7_UNC_ARB_PERF_CTR0", IntelI7UncArbPerfCtrN, IntelI7UncArbPerfCtrN, 0, 0, UINT64_C(0xfffff00000000000)), /* value=0x0 */
+ MFX(0x000003b1, "I7_UNC_ARB_PERF_CTR1", IntelI7UncArbPerfCtrN, IntelI7UncArbPerfCtrN, 0, 0, UINT64_C(0xfffff00000000000)), /* value=0x0 */
+ MFX(0x000003b2, "I7_UNC_ARB_PERF_EVT_SEL0", IntelI7UncArbPerfEvtSelN, IntelI7UncArbPerfEvtSelN, 0, 0, UINT64_C(0xffffffffc0230000)), /* value=0x0 */
+ MFX(0x000003b3, "I7_UNC_ARB_PERF_EVT_SEL1", IntelI7UncArbPerfEvtSelN, IntelI7UncArbPerfEvtSelN, 0, 0, UINT64_C(0xffffffffc0230000)), /* value=0x0 */
+ MFX(0x000003f1, "IA32_PEBS_ENABLE", Ia32PebsEnable, Ia32PebsEnable, 0, 0, UINT64_C(0x7ffffff0fffffff0)), /* value=0x0 */
+ MFX(0x000003f6, "I7_MSR_PEBS_LD_LAT", IntelI7PebsLdLat, IntelI7PebsLdLat, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0xffff */
+ MFX(0x000003f8, "I7_MSR_PKG_C3_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x7`7827f19a */
+ RSN(0x000003f9, 0x000003fa, "I7_MSR_PKG_Cn_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX),
+ MFX(0x000003fc, "I7_MSR_CORE_C3_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x1`3e604592 */
+ RSN(0x000003fd, 0x000003fe, "I7_MSR_CORE_Cn_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX),
+ RFN(0x00000400, 0x00000423, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0x00000480, "IA32_VMX_BASIC", Ia32VmxBase, ReadOnly, UINT64_C(0xda040000000010), 0, 0), /* value=0xda0400`00000010 */
+ MFX(0x00000481, "IA32_VMX_PINBASED_CTLS", Ia32VmxPinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x00000482, "IA32_VMX_PROCBASED_CTLS", Ia32VmxProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe0401e172), 0, 0), /* value=0xfff9fffe`0401e172 */
+ MFX(0x00000483, "IA32_VMX_EXIT_CTLS", Ia32VmxExitCtls, ReadOnly, UINT64_C(0x7fffff00036dff), 0, 0), /* value=0x7fffff`00036dff */
+ MFX(0x00000484, "IA32_VMX_ENTRY_CTLS", Ia32VmxEntryCtls, ReadOnly, UINT64_C(0xffff000011ff), 0, 0), /* value=0xffff`000011ff */
+ MFX(0x00000485, "IA32_VMX_MISC", Ia32VmxMisc, ReadOnly, 0x100401e5, 0, 0), /* value=0x100401e5 */
+ MFX(0x00000486, "IA32_VMX_CR0_FIXED0", Ia32VmxCr0Fixed0, ReadOnly, UINT32_C(0x80000021), 0, 0), /* value=0x80000021 */
+ MFX(0x00000487, "IA32_VMX_CR0_FIXED1", Ia32VmxCr0Fixed1, ReadOnly, UINT32_MAX, 0, 0), /* value=0xffffffff */
+ MFX(0x00000488, "IA32_VMX_CR4_FIXED0", Ia32VmxCr4Fixed0, ReadOnly, 0x2000, 0, 0), /* value=0x2000 */
+ MFX(0x00000489, "IA32_VMX_CR4_FIXED1", Ia32VmxCr4Fixed1, ReadOnly, 0x1767ff, 0, 0), /* value=0x1767ff */
+ MFX(0x0000048a, "IA32_VMX_VMCS_ENUM", Ia32VmxVmcsEnum, ReadOnly, 0x2a, 0, 0), /* value=0x2a */
+ MFX(0x0000048b, "IA32_VMX_PROCBASED_CTLS2", Ia32VmxProcBasedCtls2, ReadOnly, UINT64_C(0x8ff00000000), 0, 0), /* value=0x8ff`00000000 */
+ MFX(0x0000048c, "IA32_VMX_EPT_VPID_CAP", Ia32VmxEptVpidCap, ReadOnly, UINT64_C(0xf0106114141), 0, 0), /* value=0xf01`06114141 */
+ MFX(0x0000048d, "IA32_VMX_TRUE_PINBASED_CTLS", Ia32VmxTruePinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x0000048e, "IA32_VMX_TRUE_PROCBASED_CTLS", Ia32VmxTrueProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe04006172), 0, 0), /* value=0xfff9fffe`04006172 */
+ MFX(0x0000048f, "IA32_VMX_TRUE_EXIT_CTLS", Ia32VmxTrueExitCtls, ReadOnly, UINT64_C(0x7fffff00036dfb), 0, 0), /* value=0x7fffff`00036dfb */
+ MFX(0x00000490, "IA32_VMX_TRUE_ENTRY_CTLS", Ia32VmxTrueEntryCtls, ReadOnly, UINT64_C(0xffff000011fb), 0, 0), /* value=0xffff`000011fb */
+ RSN(0x000004c1, 0x000004c8, "IA32_A_PMCn", Ia32PmcN, Ia32PmcN, 0x0, 0, UINT64_C(0xffff000000000000)),
+ MFX(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea, 0, 0, UINT64_C(0xffff800000000000)), /* value=0xffff8804`07da1cc0 */
+ MFX(0x00000601, "I7_SB_MSR_VR_CURRENT_CONFIG", IntelI7SandyVrCurrentConfig, IntelI7SandyVrCurrentConfig, 0, UINT32_C(0x80001fff), 0x7fffe000), /* value=0x18141494`80000380 */
+ MVX(0x00000602, "I7_IB_UNK_0000_0602", UINT64_C(0x1814149480000170), UINT32_C(0x80001fff), 0x7fffe000),
+ MFX(0x00000603, "I7_SB_MSR_VR_MISC_CONFIG", IntelI7SandyVrMiscConfig, IntelI7SandyVrMiscConfig, 0, UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)), /* value=0x802c2c2c */
+ MVX(0x00000604, "I7_IB_UNK_0000_0602", UINT32_C(0x80686868), UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)),
+ MFO(0x00000606, "I7_SB_MSR_RAPL_POWER_UNIT", IntelI7SandyRaplPowerUnit), /* value=0xa1003 */
+ MFX(0x0000060a, "I7_SB_MSR_PKGC3_IRTL", IntelI7SandyPkgCnIrtlN, IntelI7SandyPkgCnIrtlN, 0x3, 0, UINT64_C(0xffffffffffff6000)), /* value=0x883b */
+ RSN(0x0000060b, 0x0000060c, "I7_SB_MSR_PKGC6_IRTn", IntelI7SandyPkgCnIrtlN, IntelI7SandyPkgCnIrtlN, 0x6, 0, UINT64_C(0xffffffffffff6000)),
+ MFO(0x0000060d, "I7_SB_MSR_PKG_C2_RESIDENCY", IntelI7SandyPkgC2Residency), /* value=0x76c`bd67b914 */
+ MFX(0x00000610, "I7_SB_MSR_PKG_POWER_LIMIT", IntelI7RaplPkgPowerLimit, IntelI7RaplPkgPowerLimit, 0, UINT64_C(0x80ffffff00ffffff), UINT64_C(0x7f000000ff000000)), /* value=0x80008302`00148268 */
+ MFO(0x00000611, "I7_SB_MSR_PKG_ENERGY_STATUS", IntelI7RaplPkgEnergyStatus), /* value=0x3451b969 */
+ MFO(0x00000614, "I7_SB_MSR_PKG_POWER_INFO", IntelI7RaplPkgPowerInfo), /* value=0xd0000`01e00268 */
+ MFX(0x00000638, "I7_SB_MSR_PP0_POWER_LIMIT", IntelI7RaplPp0PowerLimit, IntelI7RaplPp0PowerLimit, 0, UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)), /* value=0x80000000 */
+ MFO(0x00000639, "I7_SB_MSR_PP0_ENERGY_STATUS", IntelI7RaplPp0EnergyStatus), /* value=0x357de52e */
+ MFX(0x0000063a, "I7_SB_MSR_PP0_POLICY", IntelI7RaplPp0Policy, IntelI7RaplPp0Policy, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x0 */
+ MFX(0x00000640, "I7_HW_MSR_PP0_POWER_LIMIT", IntelI7RaplPp1PowerLimit, IntelI7RaplPp1PowerLimit, 0, UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)), /* value=0x80000000 */
+ MFO(0x00000641, "I7_HW_MSR_PP0_ENERGY_STATUS", IntelI7RaplPp1EnergyStatus), /* value=0x6eeef */
+ MFX(0x00000642, "I7_HW_MSR_PP0_POLICY", IntelI7RaplPp1Policy, IntelI7RaplPp1Policy, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x10 */
+ MFO(0x00000648, "I7_IB_MSR_CONFIG_TDP_NOMINAL", IntelI7IvyConfigTdpNominal), /* value=0x22 */
+ MFO(0x00000649, "I7_IB_MSR_CONFIG_TDP_LEVEL1", IntelI7IvyConfigTdpLevel1), /* value=0x1e00000`00000000 */
+ MFO(0x0000064a, "I7_IB_MSR_CONFIG_TDP_LEVEL2", IntelI7IvyConfigTdpLevel2), /* value=0x1e00000`00000000 */
+ MFO(0x0000064b, "I7_IB_MSR_CONFIG_TDP_CONTROL", IntelI7IvyConfigTdpControl), /* value=0x80000000 */
+ MFX(0x0000064c, "I7_IB_MSR_TURBO_ACTIVATION_RATIO", IntelI7IvyTurboActivationRatio, IntelI7IvyTurboActivationRatio, 0, 0, UINT64_C(0xffffffff7fffff00)), /* value=0x80000000 */
+ RFN(0x00000680, 0x0000068f, "MSR_LASTBRANCH_n_FROM_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ RFN(0x000006c0, 0x000006cf, "MSR_LASTBRANCH_n_TO_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ MFX(0x000006e0, "IA32_TSC_DEADLINE", Ia32TscDeadline, Ia32TscDeadline, 0, UINT64_C(0xb280452208b), 0), /* value=0x4293`ef1535a6 */
+ MVX(0x00000700, "I7_IB_UNK_0000_0700", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000701, "I7_IB_UNK_0000_0701", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000702, "I7_IB_UNK_0000_0702", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000703, "I7_IB_UNK_0000_0703", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000704, "I7_IB_UNK_0000_0704", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000705, "I7_IB_UNK_0000_0705", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000706, "I7_IB_UNK_0000_0706", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000707, "I7_IB_UNK_0000_0707", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000708, "I7_IB_UNK_0000_0708", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000709, "I7_IB_UNK_0000_0709", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000710, "I7_IB_UNK_0000_0710", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000711, "I7_IB_UNK_0000_0711", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000712, "I7_IB_UNK_0000_0712", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000713, "I7_IB_UNK_0000_0713", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000714, "I7_IB_UNK_0000_0714", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000715, "I7_IB_UNK_0000_0715", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000716, "I7_IB_UNK_0000_0716", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000717, "I7_IB_UNK_0000_0717", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000718, "I7_IB_UNK_0000_0718", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000719, "I7_IB_UNK_0000_0719", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000720, "I7_IB_UNK_0000_0720", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000721, "I7_IB_UNK_0000_0721", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000722, "I7_IB_UNK_0000_0722", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000723, "I7_IB_UNK_0000_0723", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000724, "I7_IB_UNK_0000_0724", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000725, "I7_IB_UNK_0000_0725", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000726, "I7_IB_UNK_0000_0726", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000727, "I7_IB_UNK_0000_0727", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000728, "I7_IB_UNK_0000_0728", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000729, "I7_IB_UNK_0000_0729", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000730, "I7_IB_UNK_0000_0730", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000731, "I7_IB_UNK_0000_0731", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000732, "I7_IB_UNK_0000_0732", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000733, "I7_IB_UNK_0000_0733", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000734, "I7_IB_UNK_0000_0734", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000735, "I7_IB_UNK_0000_0735", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000736, "I7_IB_UNK_0000_0736", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000737, "I7_IB_UNK_0000_0737", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000738, "I7_IB_UNK_0000_0738", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000739, "I7_IB_UNK_0000_0739", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000740, "I7_IB_UNK_0000_0740", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000741, "I7_IB_UNK_0000_0741", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000742, "I7_IB_UNK_0000_0742", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000743, "I7_IB_UNK_0000_0743", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000744, "I7_IB_UNK_0000_0744", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000745, "I7_IB_UNK_0000_0745", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000746, "I7_IB_UNK_0000_0746", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000747, "I7_IB_UNK_0000_0747", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000748, "I7_IB_UNK_0000_0748", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000749, "I7_IB_UNK_0000_0749", 0, 0, UINT64_C(0xfffff00000000000)),
+ RFN(0x00000800, 0x000008ff, "IA32_X2APIC_n", Ia32X2ApicN, Ia32X2ApicN),
+ MFN(0x00000c80, "IA32_DEBUG_INTERFACE", Ia32DebugInterface, Ia32DebugInterface), /* value=0x0 */
+ MVX(0x00000c81, "I7_IB_UNK_0000_0c81", 0, 0, 0),
+ MVX(0x00000c82, "I7_IB_UNK_0000_0c82", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00000c83, "I7_IB_UNK_0000_0c83", 0, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xffffffff`8159b620 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xffffffff`8159ce10 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x43700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x908880 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffff8804`1e200000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x0 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, 0, ~(uint64_t)UINT32_MAX), /* value=0x0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Core(TM) i5-3570 CPU @ 3.40GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Core_i5_3570 =
+{
+ /*.pszName = */ "Intel Core i5-3570",
+ /*.pszFullName = */ "Intel(R) Core(TM) i5-3570 CPU @ 3.40GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 58,
+ /*.uStepping = */ 9,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_Core7_IvyBridge,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_100MHZ,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 36,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Core_i5_3570),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Core_i5_3570)),
+ /*.enmUnknownCpuId = */ CPUMUKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
+ /*.DefUnknownCpuId = */ { 0x00000007, 0x00000340, 0x00000340, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Core_i5_3570)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Core_i5_3570),
+};
+
+#endif /* !VBOX_DB_Intel_Core_i5_3570 */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_2635QM.h b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_2635QM.h
new file mode 100644
index 00000000..72ab084f
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_2635QM.h
@@ -0,0 +1,326 @@
+/* $Id: Intel_Core_i7_2635QM.h $ */
+/** @file
+ * CPU database entry "Intel Core i7-2635QM".
+ * Generated at 2014-02-28T18:53:09Z by VBoxCpuReport v4.3.53r92586 on darwin.amd64.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VBOX_CPUDB_Intel_Core_i7_2635QM
+#define VBOX_CPUDB_Intel_Core_i7_2635QM
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Core(TM) i7-2635QM CPU @ 2.00GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Core_i7_2635QM[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000d, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000206a7, 0x04100800, 0x1fbae3bf, 0xbfebfbff, 0 },
+ { 0x00000002, 0x00000000, 0x00000000, 0x76035a01, 0x00f0b2ff, 0x00000000, 0x00ca0000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, UINT32_MAX, 0x1c004121, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000004, 0x00000001, UINT32_MAX, 0x1c004122, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000004, 0x00000002, UINT32_MAX, 0x1c004143, 0x01c0003f, 0x000001ff, 0x00000000, 0 },
+ { 0x00000004, 0x00000003, UINT32_MAX, 0x1c03c163, 0x02c0003f, 0x00001fff, 0x00000006, 0 },
+ { 0x00000004, 0x00000004, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00021120, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000077, 0x00000002, 0x00000009, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07300403, 0x00000000, 0x00000000, 0x00000603, 0 },
+ { 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000000, UINT32_MAX, 0x00000007, 0x00000340, 0x00000340, 0x00000000, 0 },
+ { 0x0000000d, 0x00000001, UINT32_MAX, 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000002, UINT32_MAX, 0x00000100, 0x00000240, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000003, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x28100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20202020, 0x6e492020, 0x286c6574, 0x43202952, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x2865726f, 0x20294d54, 0x322d3769, 0x51353336, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x5043204d, 0x20402055, 0x30302e32, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01006040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000100, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003024, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Core(TM) i7-2635QM CPU @ 2.00GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Core_i7_2635QM[] =
+{
+ MFX(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr, Ia32P5McAddr, 0, UINT64_C(0xffffffffffffffe0), 0), /* value=0x1f */
+ MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX), /* value=0x0 */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, 0, UINT64_C(0xffffffffffff0000)), /* value=0x40 */
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x94d`1967512c */
+ MFX(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x10000000000000), 0, 0), /* value=0x100000`00000000 */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xfffffff0000002ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVX(0x0000002e, "I7_UNK_0000_002e", 0, 0x400, UINT64_C(0xfffffffffffffbff)),
+ MVX(0x00000033, "TEST_CTL", 0, 0, UINT64_C(0xffffffff7fffffff)),
+ MVO(0x00000034, "P6_UNK_0000_0034", 0x5),
+ MFO(0x00000035, "MSR_CORE_THREAD_COUNT", IntelI7CoreThreadCount), /* value=0x40008 */
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x5 */
+ MVX(0x0000003e, "I7_UNK_0000_003e", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, IgnoreWrite),
+ MFX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", Ia32BiosSignId, Ia32BiosSignId, 0, 0, UINT32_C(0xfffffffe)), /* value=0x28`00000000 */
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ RSN(0x000000c1, 0x000000c4, "IA32_PMCn", Ia32PmcN, Ia32PmcN, 0x0, ~(uint64_t)UINT32_MAX, 0),
+ MFO(0x000000ce, "IA32_PLATFORM_INFO", IntelPlatformInfo), /* value=0x800`60011400 */
+ MFX(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl, IntelPkgCStConfigControl, 0, 0, UINT64_C(0xffffffffe1ffffff)), /* value=0x405 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, 0, UINT64_C(0xfffffffffff80000)), /* value=0x20414 */
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0x6a`9190b14b */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x69`df4de05c */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0xd0a, 0, 0), /* value=0xd0a */
+ MFN(0x00000132, "CPUID1_FEATURE_MASK", IntelCpuId1FeatureMaskEax, IntelCpuId1FeatureMaskEax), /* value=0xffffffff`ffffffff */
+ MFN(0x00000133, "CPUIDD_01_FEATURE_MASK", IntelCpuId1FeatureMaskEcdx, IntelCpuId1FeatureMaskEcdx), /* value=0xffffffff`ffffffff */
+ MFN(0x00000134, "CPUID80000001_FEATURE_MASK", IntelCpuId80000001FeatureMaskEcdx, IntelCpuId80000001FeatureMaskEcdx), /* value=0xffffffff`ffffffff */
+ MFX(0x0000013c, "I7_SB_AES_NI_CTL", IntelI7SandyAesNiCtl, IntelI7SandyAesNiCtl, 0, 0, UINT64_C(0xfffffffffffffffc)), /* value=0x0 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0xb */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, 0, UINT64_C(0xffff800000000000)), /* value=0xffffff80`22904080 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, 0, UINT64_C(0xffff800000000000)), /* value=0xffffff80`222f3030 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0xc09, 0, 0), /* value=0xc09 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x0 */
+ RSN(0x00000186, 0x00000189, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, 0, UINT64_C(0xffffffff00080000)),
+ MFX(0x00000194, "CLOCK_FLEX_MAX", IntelFlexRatio, IntelFlexRatio, 0, 0xe0000, UINT64_C(0xfffffffffff00000)), /* value=0x0 */
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0x1d4d00000e00), 0, 0), /* value=0x1d4d`00000e00 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x1d00, 0, 0), /* Might bite. value=0x1d00 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x0 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0, 0, UINT64_C(0xfffffffffe0000e8)), /* value=0x0 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, UINT32_C(0x883d0000), UINT32_C(0xf87f0fff), UINT64_C(0xffffffff0780f000)), /* value=0x883d0000 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x850089, 0x1080, UINT64_C(0xffffffbbff3aef72)), /* value=0x850089 */
+ MFX(0x000001a2, "I7_MSR_TEMPERATURE_TARGET", IntelI7TemperatureTarget, IntelI7TemperatureTarget, 0x640e00, 0xffff00, UINT64_C(0xfffffffff00000ff)), /* value=0x640e00 */
+ MVX(0x000001a4, "I7_UNK_0000_01a4", 0, 0, UINT64_C(0xfffffffffffff7f0)),
+ RSN(0x000001a6, 0x000001a7, "I7_MSR_OFFCORE_RSP_n", IntelI7MsrOffCoreResponseN, IntelI7MsrOffCoreResponseN, 0x0, 0, UINT64_C(0xffffffc000007000)),
+ MVX(0x000001a8, "I7_UNK_0000_01a8", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFX(0x000001aa, "MSR_MISC_PWR_MGMT", IntelI7MiscPwrMgmt, IntelI7MiscPwrMgmt, 0, 0, UINT64_C(0xffffffffffbffffe)), /* value=0x400001 */
+ MFX(0x000001ad, "I7_MSR_TURBO_RATIO_LIMIT", IntelI7TurboRatioLimit, ReadOnly, 0x1a1a1c1d, 0, 0), /* value=0x1a1a1c1d */
+ MVX(0x000001b0, "IA32_ENERGY_PERF_BIAS", 0x4, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x000001b1, "IA32_PACKAGE_THERM_STATUS", UINT32_C(0x883a0000), UINT32_C(0xf87f0fff), UINT64_C(0xffffffff0780f000)),
+ MVX(0x000001b2, "IA32_PACKAGE_THERM_INTERRUPT", 0, 0, UINT64_C(0xfffffffffe0000e8)),
+ MVO(0x000001c6, "I7_UNK_0000_01c6", 0x3),
+ MFX(0x000001c8, "MSR_LBR_SELECT", IntelI7LbrSelect, IntelI7LbrSelect, 0, 0, UINT64_C(0xfffffffffffffe00)), /* value=0x0 */
+ MFX(0x000001c9, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, 0, UINT64_C(0xfffffffffffffff0)), /* value=0xc */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffff803c)), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0x7fffff7f`a4a6e188 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xffffff80`222d5ad0 */
+ MFX(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp, P6LastIntFromIp, 0, 0, UINT64_C(0x7fff800000000000)), /* value=0x0 */
+ MFX(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp, P6LastIntToIp, 0, 0, UINT64_C(0xffff800000000000)), /* value=0x0 */
+ MVO(0x000001e1, "I7_SB_UNK_0000_01e1", 0x2),
+ MFO(0x000001f0, "I7_VLW_CAPABILITY", IntelI7VirtualLegacyWireCap), /* value=0x74 */
+ MFO(0x000001f2, "IA32_SMRR_PHYSBASE", Ia32SmrrPhysBase), /* value=0x0 */
+ MFO(0x000001f3, "IA32_SMRR_PHYSMASK", Ia32SmrrPhysMask), /* value=0x0 */
+ MFX(0x000001fc, "I7_MSR_POWER_CTL", IntelI7PowerCtl, IntelI7PowerCtl, 0, 0x20, UINT64_C(0xfffffffffff20000)), /* value=0x4005f */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xc0000000 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`c0000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xa0000000 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`e0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x90000000 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`f0000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x8c000000 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`fc000800 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x8b800000 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ff800800 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x00000210, "IA32_MTRR_PHYS_BASE8", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x8, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000211, "IA32_MTRR_PHYS_MASK8", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x8, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x00000212, "IA32_MTRR_PHYS_BASE9", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x9, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000213, "IA32_MTRR_PHYS_MASK9", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x9, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ RSN(0x00000280, 0x00000281, "IA32_MC0_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x0, 0, UINT64_C(0xffffffffbfff8000)),
+ MFX(0x00000282, "IA32_MC2_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x2, 0x40007fff, UINT64_C(0xffffffffbfff8000)), /* value=0x0 */
+ MFX(0x00000283, "IA32_MC3_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x3, 0, UINT64_C(0xffffffffbfff8000)), /* value=0x0 */
+ MFX(0x00000284, "IA32_MC4_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x4, 0x40007fff, UINT64_C(0xffffffffbfff8000)), /* value=0x0 */
+ RSN(0x00000285, 0x00000288, "IA32_MC5_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x5, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x000002e0, "I7_SB_NO_EVICT_MODE", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFN(0x000002e6, "I7_IB_UNK_0000_02e6", WriteOnly, IgnoreWrite),
+ MVX(0x000002e7, "I7_IB_UNK_0000_02e7", 0x1, 0x1, UINT64_C(0xfffffffffffffffe)),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ MVO(0x00000305, "I7_SB_UNK_0000_0305", 0),
+ RSN(0x00000309, 0x0000030b, "IA32_FIXED_CTRn", Ia32FixedCtrN, Ia32FixedCtrN, 0x0, 0, UINT64_C(0xffff000000000000)),
+ MFX(0x00000345, "IA32_PERF_CAPABILITIES", Ia32PerfCapabilities, ReadOnly, 0x31c3, 0, 0), /* value=0x31c3 */
+ MFX(0x0000038d, "IA32_FIXED_CTR_CTRL", Ia32FixedCtrCtrl, Ia32FixedCtrCtrl, 0, 0, UINT64_C(0xfffffffffffff000)), /* value=0x0 */
+ MFX(0x0000038e, "IA32_PERF_GLOBAL_STATUS", Ia32PerfGlobalStatus, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x0000038f, "IA32_PERF_GLOBAL_CTRL", Ia32PerfGlobalCtrl, Ia32PerfGlobalCtrl, 0, 0, UINT64_C(0xfffffff8fffffff0)), /* value=0xf */
+ MFX(0x00000390, "IA32_PERF_GLOBAL_OVF_CTRL", Ia32PerfGlobalOvfCtrl, Ia32PerfGlobalOvfCtrl, 0, UINT64_C(0xe00000070000000f), UINT64_C(0x1ffffff8fffffff0)), /* value=0x0 */
+ MFX(0x00000391, "I7_UNC_PERF_GLOBAL_CTRL", IntelI7UncPerfGlobalCtrl, IntelI7UncPerfGlobalCtrl, 0, 0, UINT64_C(0xffffffff1fffffe0)), /* value=0x0 */
+ MFX(0x00000392, "I7_UNC_PERF_GLOBAL_STATUS", IntelI7UncPerfGlobalStatus, IntelI7UncPerfGlobalStatus, 0, 0xf, UINT64_C(0xfffffffffffffff0)), /* value=0x0 */
+ MFX(0x00000393, "I7_UNC_PERF_GLOBAL_OVF_CTRL", IntelI7UncPerfGlobalOvfCtrl, IntelI7UncPerfGlobalOvfCtrl, 0, 0x3, UINT64_C(0xfffffffffffffffc)), /* value=0x0 */
+ MFX(0x00000394, "I7_UNC_PERF_FIXED_CTR_CTRL", IntelI7UncPerfFixedCtrCtrl, IntelI7UncPerfFixedCtrCtrl, 0, 0, UINT64_C(0xffffffffffafffff)), /* value=0x0 */
+ MFX(0x00000395, "I7_UNC_PERF_FIXED_CTR", IntelI7UncPerfFixedCtr, IntelI7UncPerfFixedCtr, 0, 0, UINT64_C(0xffff000000000000)), /* value=0x0 */
+ MFO(0x00000396, "I7_UNC_CBO_CONFIG", IntelI7UncCBoxConfig), /* value=0x5 */
+ MVX(0x00000397, "I7_SB_UNK_0000_0397", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MFX(0x000003b0, "I7_UNC_ARB_PERF_CTR0", IntelI7UncArbPerfCtrN, IntelI7UncArbPerfCtrN, 0, 0, UINT64_C(0xfffff00000000000)), /* value=0x0 */
+ MFX(0x000003b1, "I7_UNC_ARB_PERF_CTR1", IntelI7UncArbPerfCtrN, IntelI7UncArbPerfCtrN, 0, 0, UINT64_C(0xfffff00000000000)), /* value=0x0 */
+ MFX(0x000003b2, "I7_UNC_ARB_PERF_EVT_SEL0", IntelI7UncArbPerfEvtSelN, IntelI7UncArbPerfEvtSelN, 0, 0, UINT64_C(0xffffffffe0230000)), /* value=0x0 */
+ MFX(0x000003b3, "I7_UNC_ARB_PERF_EVT_SEL1", IntelI7UncArbPerfEvtSelN, IntelI7UncArbPerfEvtSelN, 0, 0, UINT64_C(0xffffffffe0230000)), /* value=0x0 */
+ MFX(0x000003f1, "IA32_PEBS_ENABLE", Ia32PebsEnable, Ia32PebsEnable, 0, 0, UINT64_C(0x7ffffff0fffffff0)), /* value=0x0 */
+ MFX(0x000003f6, "I7_MSR_PEBS_LD_LAT", IntelI7PebsLdLat, IntelI7PebsLdLat, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0xffff */
+ MFX(0x000003f8, "I7_MSR_PKG_C3_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x0 */
+ RSN(0x000003f9, 0x000003fa, "I7_MSR_PKG_Cn_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX),
+ MFX(0x000003fc, "I7_MSR_CORE_C3_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x278ad50 */
+ RSN(0x000003fd, 0x000003fe, "I7_MSR_CORE_Cn_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX),
+ RFN(0x00000400, 0x00000423, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0x00000480, "IA32_VMX_BASIC", Ia32VmxBase, ReadOnly, UINT64_C(0xda040000000010), 0, 0), /* value=0xda0400`00000010 */
+ MFX(0x00000481, "IA32_VMX_PINBASED_CTLS", Ia32VmxPinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x00000482, "IA32_VMX_PROCBASED_CTLS", Ia32VmxProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe0401e172), 0, 0), /* value=0xfff9fffe`0401e172 */
+ MFX(0x00000483, "IA32_VMX_EXIT_CTLS", Ia32VmxExitCtls, ReadOnly, UINT64_C(0x7fffff00036dff), 0, 0), /* value=0x7fffff`00036dff */
+ MFX(0x00000484, "IA32_VMX_ENTRY_CTLS", Ia32VmxEntryCtls, ReadOnly, UINT64_C(0xffff000011ff), 0, 0), /* value=0xffff`000011ff */
+ MFX(0x00000485, "IA32_VMX_MISC", Ia32VmxMisc, ReadOnly, 0x100401e5, 0, 0), /* value=0x100401e5 */
+ MFX(0x00000486, "IA32_VMX_CR0_FIXED0", Ia32VmxCr0Fixed0, ReadOnly, UINT32_C(0x80000021), 0, 0), /* value=0x80000021 */
+ MFX(0x00000487, "IA32_VMX_CR0_FIXED1", Ia32VmxCr0Fixed1, ReadOnly, UINT32_MAX, 0, 0), /* value=0xffffffff */
+ MFX(0x00000488, "IA32_VMX_CR4_FIXED0", Ia32VmxCr4Fixed0, ReadOnly, 0x2000, 0, 0), /* value=0x2000 */
+ MFX(0x00000489, "IA32_VMX_CR4_FIXED1", Ia32VmxCr4Fixed1, ReadOnly, 0x627ff, 0, 0), /* value=0x627ff */
+ MFX(0x0000048a, "IA32_VMX_VMCS_ENUM", Ia32VmxVmcsEnum, ReadOnly, 0x2a, 0, 0), /* value=0x2a */
+ MFX(0x0000048b, "IA32_VMX_PROCBASED_CTLS2", Ia32VmxProcBasedCtls2, ReadOnly, UINT64_C(0xff00000000), 0, 0), /* value=0xff`00000000 */
+ MFX(0x0000048c, "IA32_VMX_EPT_VPID_CAP", Ia32VmxEptVpidCap, ReadOnly, UINT64_C(0xf0106114141), 0, 0), /* value=0xf01`06114141 */
+ MFX(0x0000048d, "IA32_VMX_TRUE_PINBASED_CTLS", Ia32VmxTruePinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x0000048e, "IA32_VMX_TRUE_PROCBASED_CTLS", Ia32VmxTrueProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe04006172), 0, 0), /* value=0xfff9fffe`04006172 */
+ MFX(0x0000048f, "IA32_VMX_TRUE_EXIT_CTLS", Ia32VmxTrueExitCtls, ReadOnly, UINT64_C(0x7fffff00036dfb), 0, 0), /* value=0x7fffff`00036dfb */
+ MFX(0x00000490, "IA32_VMX_TRUE_ENTRY_CTLS", Ia32VmxTrueEntryCtls, ReadOnly, UINT64_C(0xffff000011fb), 0, 0), /* value=0xffff`000011fb */
+ RSN(0x000004c1, 0x000004c4, "IA32_A_PMCn", Ia32PmcN, Ia32PmcN, 0x0, 0, UINT64_C(0xffff000000000000)),
+ MVO(0x00000502, "I7_SB_UNK_0000_0502", 0),
+ MFN(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea), /* value=0x0 */
+ MFX(0x00000601, "I7_SB_MSR_VR_CURRENT_CONFIG", IntelI7SandyVrCurrentConfig, IntelI7SandyVrCurrentConfig, 0, UINT32_C(0x80001fff), 0x7fffe000), /* value=0x18141494`8000030c */
+ MVX(0x00000602, "I7_IB_UNK_0000_0602", UINT64_C(0x1814149480000104), UINT32_C(0x80001fff), 0x7fffe000),
+ MFX(0x00000603, "I7_SB_MSR_VR_MISC_CONFIG", IntelI7SandyVrMiscConfig, IntelI7SandyVrMiscConfig, 0, UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)), /* value=0x80303030 */
+ MVX(0x00000604, "I7_IB_UNK_0000_0602", UINT32_C(0x80646464), UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)),
+ MFO(0x00000606, "I7_SB_MSR_RAPL_POWER_UNIT", IntelI7SandyRaplPowerUnit), /* value=0xa1003 */
+ MVX(0x00000609, "I7_SB_UNK_0000_0609", 0, 0, UINT64_C(0xffffffffffffff00)),
+ MFX(0x0000060a, "I7_SB_MSR_PKGC3_IRTL", IntelI7SandyPkgCnIrtlN, IntelI7SandyPkgCnIrtlN, 0x3, 0, UINT64_C(0xffffffffffff6000)), /* value=0x8c02 */
+ RSN(0x0000060b, 0x0000060c, "I7_SB_MSR_PKGC6_IRTn", IntelI7SandyPkgCnIrtlN, IntelI7SandyPkgCnIrtlN, 0x6, 0, UINT64_C(0xffffffffffff6000)),
+ MFO(0x0000060d, "I7_SB_MSR_PKG_C2_RESIDENCY", IntelI7SandyPkgC2Residency), /* value=0x11`06f311d4 */
+ MFX(0x00000610, "I7_SB_MSR_PKG_POWER_LIMIT", IntelI7RaplPkgPowerLimit, IntelI7RaplPkgPowerLimit, 0, UINT64_C(0x80ffffff00ffffff), UINT64_C(0x7f000000ff000000)), /* value=0x800001c2`00dc8168 */
+ MFO(0x00000611, "I7_SB_MSR_PKG_ENERGY_STATUS", IntelI7RaplPkgEnergyStatus), /* value=0x55a9ec99 */
+ MFO(0x00000614, "I7_SB_MSR_PKG_POWER_INFO", IntelI7RaplPkgPowerInfo), /* value=0x100240`01200168 */
+ MFX(0x00000638, "I7_SB_MSR_PP0_POWER_LIMIT", IntelI7RaplPp0PowerLimit, IntelI7RaplPp0PowerLimit, 0, UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)), /* value=0x80000000 */
+ MFO(0x00000639, "I7_SB_MSR_PP0_ENERGY_STATUS", IntelI7RaplPp0EnergyStatus), /* value=0x1dcdc9a0 */
+ MFX(0x0000063a, "I7_SB_MSR_PP0_POLICY", IntelI7RaplPp0Policy, IntelI7RaplPp0Policy, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x0 */
+ MFX(0x00000640, "I7_HW_MSR_PP0_POWER_LIMIT", IntelI7RaplPp1PowerLimit, IntelI7RaplPp1PowerLimit, 0, UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)), /* value=0x80000000 */
+ MFO(0x00000641, "I7_HW_MSR_PP0_ENERGY_STATUS", IntelI7RaplPp1EnergyStatus), /* value=0x39748b6 */
+ MFX(0x00000642, "I7_HW_MSR_PP0_POLICY", IntelI7RaplPp1Policy, IntelI7RaplPp1Policy, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x10 */
+ RFN(0x00000680, 0x0000068f, "MSR_LASTBRANCH_n_FROM_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ RFN(0x000006c0, 0x000006cf, "MSR_LASTBRANCH_n_TO_IP", IntelLastBranchToN, IntelLastBranchToN),
+ MFX(0x000006e0, "IA32_TSC_DEADLINE", Ia32TscDeadline, Ia32TscDeadline, 0, UINT64_C(0x1000000018), 0), /* value=0x94d`402e841f */
+ MVX(0x00000700, "MSR_UNC_CBO_0_PERFEVTSEL0", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000701, "MSR_UNC_CBO_0_PERFEVTSEL1", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000702, "MSR_UNC_CBO_0_PERFEVTSEL2?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000703, "MSR_UNC_CBO_0_PERFEVTSEL3?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000704, "MSR_UNC_CBO_0_UNK_4", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000705, "MSR_UNC_CBO_0_UNK_5", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000706, "MSR_UNC_CBO_0_PER_CTR0", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000707, "MSR_UNC_CBO_0_PER_CTR1", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000708, "MSR_UNC_CBO_0_PER_CTR2?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000709, "MSR_UNC_CBO_0_PER_CTR3?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000710, "MSR_UNC_CBO_1_PERFEVTSEL0", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000711, "MSR_UNC_CBO_1_PERFEVTSEL1", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000712, "MSR_UNC_CBO_1_PERFEVTSEL2?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000713, "MSR_UNC_CBO_1_PERFEVTSEL3?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000714, "MSR_UNC_CBO_1_UNK_4", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000715, "MSR_UNC_CBO_1_UNK_5", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000716, "MSR_UNC_CBO_1_PER_CTR0", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000717, "MSR_UNC_CBO_1_PER_CTR1", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000718, "MSR_UNC_CBO_1_PER_CTR2?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000719, "MSR_UNC_CBO_1_PER_CTR3?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000720, "MSR_UNC_CBO_2_PERFEVTSEL0", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000721, "MSR_UNC_CBO_2_PERFEVTSEL1", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000722, "MSR_UNC_CBO_2_PERFEVTSEL2?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000723, "MSR_UNC_CBO_2_PERFEVTSEL3?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000724, "MSR_UNC_CBO_2_UNK_4", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000725, "MSR_UNC_CBO_2_UNK_5", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000726, "MSR_UNC_CBO_2_PER_CTR0", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000727, "MSR_UNC_CBO_2_PER_CTR1", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000728, "MSR_UNC_CBO_2_PER_CTR2?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000729, "MSR_UNC_CBO_2_PER_CTR3?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000730, "MSR_UNC_CBO_3_PERFEVTSEL0", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000731, "MSR_UNC_CBO_3_PERFEVTSEL1", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000732, "MSR_UNC_CBO_3_PERFEVTSEL2?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000733, "MSR_UNC_CBO_3_PERFEVTSEL3?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000734, "MSR_UNC_CBO_3_UNK_4", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000735, "MSR_UNC_CBO_3_UNK_5", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000736, "MSR_UNC_CBO_3_PER_CTR0", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000737, "MSR_UNC_CBO_3_PER_CTR1", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000738, "MSR_UNC_CBO_3_PER_CTR2?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000739, "MSR_UNC_CBO_3_PER_CTR3?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000740, "MSR_UNC_CBO_4_PERFEVTSEL0?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000741, "MSR_UNC_CBO_4_PERFEVTSEL1?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000742, "MSR_UNC_CBO_4_PERFEVTSEL2?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000743, "MSR_UNC_CBO_4_PERFEVTSEL3?", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000744, "MSR_UNC_CBO_4_UNK_4", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000745, "MSR_UNC_CBO_4_UNK_5", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000746, "MSR_UNC_CBO_4_PER_CTR0?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000747, "MSR_UNC_CBO_4_PER_CTR1?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000748, "MSR_UNC_CBO_4_PER_CTR2?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000749, "MSR_UNC_CBO_4_PER_CTR3?", 0, 0, UINT64_C(0xfffff00000000000)),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x1b0008`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xffffff80`222f2fd0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0x0 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x4700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x0 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffffff81`0500f000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x7fff`7b14d3f0 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, 0, ~(uint64_t)UINT32_MAX), /* value=0x0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Core(TM) i7-2635QM CPU @ 2.00GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Core_i7_2635QM =
+{
+ /*.pszName = */ "Intel Core i7-2635QM",
+ /*.pszFullName = */ "Intel(R) Core(TM) i7-2635QM CPU @ 2.00GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 42,
+ /*.uStepping = */ 7,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_Core7_SandyBridge,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_100MHZ,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 36,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Core_i7_2635QM),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Core_i7_2635QM)),
+ /*.enmUnknownCpuId = */ CPUMUKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
+ /*.DefUnknownCpuId = */ { 0x00000007, 0x00000340, 0x00000340, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Core_i7_2635QM)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Core_i7_2635QM),
+};
+
+#endif /* !VBOX_DB_Intel_Core_i7_2635QM */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_3820QM.h b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_3820QM.h
new file mode 100644
index 00000000..0e7905e0
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_3820QM.h
@@ -0,0 +1,383 @@
+/* $Id: Intel_Core_i7_3820QM.h $ */
+/** @file
+ * CPU database entry "Intel Core i7-3820QM".
+ * Generated at 2013-12-04T12:54:32Z by VBoxCpuReport v4.3.51r91071 on darwin.amd64.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VBOX_CPUDB_Intel_Core_i7_3820QM
+#define VBOX_CPUDB_Intel_Core_i7_3820QM
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Core(TM) i7-3820QM CPU @ 2.70GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Core_i7_3820QM[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000d, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000306a9, 0x02100800, 0x7fbae3ff, 0xbfebfbff, 0 },
+ { 0x00000002, 0x00000000, 0x00000000, 0x76035a01, 0x00f0b2ff, 0x00000000, 0x00ca0000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, 0x00000000, 0x1c004121, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00021120, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000077, 0x00000002, 0x00000009, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000281, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07300403, 0x00000000, 0x00000000, 0x00000603, 0 },
+ { 0x0000000b, 0x00000000, 0x00000000, 0x00000001, 0x00000002, 0x00000100, 0x00000002, 0 },
+ { 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000000, 0x00000000, 0x00000007, 0x00000340, 0x00000340, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x28100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20202020, 0x6e492020, 0x286c6574, 0x43202952, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x2865726f, 0x20294d54, 0x332d3769, 0x51303238, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x5043204d, 0x20402055, 0x30372e32, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01006040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000100, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003024, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Core(TM) i7-3820QM CPU @ 2.70GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Core_i7_3820QM[] =
+{
+ MFX(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr, Ia32P5McAddr, 0, UINT64_C(0xffffffffffffffe0), 0), /* value=0x1f */
+ MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX), /* value=0x0 */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, 0, UINT64_C(0xffffffffffff0000)), /* value=0x40 */
+ MFX(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter, 0, 0, 0),
+ MFV(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x10000000000000)),
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00900), 0, UINT64_C(0xfffffff0000002ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVX(0x0000002e, "I7_UNK_0000_002e", 0, 0x400, UINT64_C(0xfffffffffffffbff)),
+ MVX(0x00000033, "TEST_CTL", 0, 0, UINT64_C(0xffffffff7fffffff)),
+ MVO(0x00000034, "P6_UNK_0000_0034", 0xe),
+ MFO(0x00000035, "MSR_CORE_THREAD_COUNT", IntelI7CoreThreadCount), /* value=0x40008*/
+ MVO(0x00000036, "I7_UNK_0000_0036", 0x6c405eec),
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0xff07 */
+ MVX(0x0000003e, "I7_UNK_0000_003e", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, Ia32BiosUpdateTrigger),
+ MVX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", UINT64_C(0x1500000000), 0x1, UINT32_C(0xfffffffe)),
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ MFX(0x000000c1, "IA32_PMC0", Ia32PmcN, Ia32PmcN, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x000000c2, "IA32_PMC1", Ia32PmcN, Ia32PmcN, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x000000c3, "IA32_PMC2", Ia32PmcN, Ia32PmcN, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x000000c4, "IA32_PMC3", Ia32PmcN, Ia32PmcN, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVO(0x000000ce, "P6_UNK_0000_00ce", UINT64_C(0x80c10f0011b00)),
+ MFX(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl, IntelPkgCStConfigControl, 0, 0, UINT64_C(0xffffffffe1fffbf8)), /* value=0x8405 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, 0, UINT64_C(0xfffffffffff80000)), /* value=0x20414 */
+ MFX(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf, 0, 0x47810, 0), /* value=0x6b`5d075e9c */
+ MFX(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf, 0, 0x1121880, 0), /* value=0x55`2bec768b */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0xd0a, 0, 0), /* value=0xd0a */
+ MVX(0x00000102, "I7_IVY_UNK_0000_0102", 0, 0, UINT64_C(0xffffffff7fff8000)),
+ MVX(0x00000103, "I7_IVY_UNK_0000_0103", 0, 0, UINT64_C(0xffffffffffffff00)),
+ MVX(0x00000104, "I7_IVY_UNK_0000_0104", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MVX(0x00000132, "I7_UNK_0000_0132", UINT64_MAX, 0, 0),
+ MVX(0x00000133, "I7_UNK_0000_0133", UINT64_MAX, 0, 0),
+ MVX(0x00000134, "I7_UNK_0000_0134", UINT64_MAX, 0, 0),
+ MVO(0x0000013c, "TODO_0000_013c", 0x1),
+ MVX(0x00000140, "I7_IVY_UNK_0000_0140", 0, 0, UINT64_C(0xfffffffffffffffe)),
+ MVX(0x00000142, "I7_IVY_UNK_0000_0142", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0xb */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, 0, UINT64_C(0xffff800000000000)), /* value=0xffffff80`21af5080 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, 0, UINT64_C(0xffff800000000000)), /* value=0xffffff80`214ce720 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0xc09, 0, 0), /* value=0xc09 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x0 */
+ RSN(0x00000186, 0x00000189, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0, 0, UINT64_C(0xffffffff00080000)),
+ MVX(0x00000194, "CLOCK_FLEX_MAX", 0x180000, 0x1e00ff, UINT64_C(0xffffffffffe00000)),
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0x240700002400), 0, 0), /* value=0x2407`00002400 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x2500, 0, 0), /* Might bite. value=0x2500 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x0 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0x10, 0, UINT64_C(0xfffffffffe0000e8)), /* value=0x10 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, UINT32_C(0x88340000), UINT32_C(0xf87f0fff), UINT64_C(0xffffffff0780f000)), /* value=0x88340000 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x850089, 0x1080, UINT64_C(0xffffffbbff3aef72)), /* value=0x850089 */
+ MFX(0x000001a2, "I7_MSR_TEMPERATURE_TARGET", IntelI7TemperatureTarget, IntelI7TemperatureTarget, 0x691200, 0xffff00, UINT64_C(0xfffffffff00000ff)), /* value=0x691200 */
+ MVX(0x000001a4, "I7_UNK_0000_01a4", 0, 0, UINT64_C(0xfffffffffffff7f0)),
+ RSN(0x000001a6, 0x000001a7, "I7_MSR_OFFCORE_RSP_n", IntelI7MsrOffCoreResponseN, IntelI7MsrOffCoreResponseN, 0, 0, UINT64_C(0xffffffc000007000)), /* XXX: The range ended earlier than expected! */
+ MVX(0x000001a8, "I7_UNK_0000_01a8", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFX(0x000001aa, "MSR_MISC_PWR_MGMT", IntelI7MiscPwrMgmt, IntelI7MiscPwrMgmt, 0, 0, UINT64_C(0xffffffffffbffffe)), /* value=0x400001 */
+ MVX(0x000001ad, "TODO_0000_01ad", 0x23232425, UINT32_MAX, ~(uint64_t)UINT32_MAX),
+ MVX(0x000001b0, "IA32_ENERGY_PERF_BIAS", 0x4, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x000001b1, "IA32_PACKAGE_THERM_STATUS", UINT32_C(0x88300000), UINT32_C(0xf87f0fff), UINT64_C(0xffffffff0780f000)),
+ MVX(0x000001b2, "IA32_PACKAGE_THERM_INTERRUPT", 0, 0, UINT64_C(0xfffffffffe0000e8)),
+ MVO(0x000001c6, "TODO_0000_01c6", 0x3),
+ MVX(0x000001c8, "TODO_0000_01c8", 0, 0, UINT64_C(0xfffffffffffffe00)),
+ MFX(0x000001c9, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, 0, UINT64_C(0xfffffffffffffff0)), /* value=0x8 */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffff803c)), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0x7fffff7f`a38c2298 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xffffff80`214b24e0 */
+ MFX(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp, P6LastIntFromIp, 0, 0, UINT64_C(0x7fff800000000000)), /* value=0x0 */
+ MFX(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp, P6LastIntToIp, 0, 0, UINT64_C(0xffff800000000000)), /* value=0x0 */
+ MVO(0x000001f0, "TODO_0000_01f0", 0x74),
+ MVO(0x000001f2, "TODO_0000_01f2", UINT32_C(0x8b000006)),
+ MVO(0x000001f3, "TODO_0000_01f3", UINT32_C(0xff800800)),
+ MVX(0x000001fc, "TODO_0000_01fc", 0x340047, 0x20, UINT64_C(0xffffffffffc20000)),
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xc0000000 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`c0000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xa0000000 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`e0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x90000000 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`f0000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x8c000000 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`fc000800 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x8b000000 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ff000800 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x00000210, "IA32_MTRR_PHYS_BASE8", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x8, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000211, "IA32_MTRR_PHYS_MASK8", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x8, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x00000212, "IA32_MTRR_PHYS_BASE9", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x9, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000213, "IA32_MTRR_PHYS_MASK9", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x9, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MVX(0x00000280, "TODO_0000_0280", 0, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x00000281, "TODO_0000_0281", 0, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x00000282, "TODO_0000_0282", 0, 0x40007fff, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x00000283, "TODO_0000_0283", 0, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x00000284, "TODO_0000_0284", 0, 0x40007fff, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x00000285, "TODO_0000_0285", 0, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x00000286, "TODO_0000_0286", 0, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x00000287, "TODO_0000_0287", 0, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x00000288, "TODO_0000_0288", 0, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x000002e0, "TODO_0000_02e0", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFN(0x000002e6, "TODO_0000_02e6", WriteOnly, IgnoreWrite),
+ MVX(0x000002e7, "TODO_0000_02e7", 0x1, 0x1, UINT64_C(0xfffffffffffffffe)),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ MVO(0x00000305, "TODO_0000_0305", 0),
+ MVX(0x00000309, "TODO_0000_0309", 0, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x0000030a, "TODO_0000_030a", 0, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x0000030b, "TODO_0000_030b", 0, 0, UINT64_C(0xffff000000000000)),
+ MVO(0x00000345, "TODO_0000_0345", 0x31c3),
+ MVX(0x0000038d, "TODO_0000_038d", 0, 0, UINT64_C(0xfffffffffffff000)),
+ MVO(0x0000038e, "TODO_0000_038e", UINT64_C(0x8000000000000000)),
+ MVX(0x0000038f, "TODO_0000_038f", 0xf, 0, UINT64_C(0xfffffff8fffffff0)),
+ MVX(0x00000390, "TODO_0000_0390", 0, UINT64_C(0xe00000070000000f), UINT64_C(0x1ffffff8fffffff0)),
+ MVX(0x00000391, "TODO_0000_0391", 0, 0, UINT64_C(0xffffffff1fffffe0)),
+ MVX(0x00000392, "TODO_0000_0392", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000393, "TODO_0000_0393", 0, 0x3, UINT64_C(0xfffffffffffffffc)),
+ MVX(0x00000394, "TODO_0000_0394", 0, 0, UINT64_C(0xffffffffffafffff)),
+ MVX(0x00000395, "TODO_0000_0395", 0, 0, UINT64_C(0xffff000000000000)),
+ MVO(0x00000396, "TODO_0000_0396", 0x5),
+ MVX(0x00000397, "TODO_0000_0397", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x000003b0, "TODO_0000_03b0", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x000003b1, "TODO_0000_03b1", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x000003b2, "TODO_0000_03b2", 0, 0, UINT64_C(0xffffffffc0230000)),
+ MVX(0x000003b3, "TODO_0000_03b3", 0, 0, UINT64_C(0xffffffffc0230000)),
+ MVX(0x000003f1, "TODO_0000_03f1", 0, 0, UINT64_C(0x7ffffff0fffffff0)),
+ MVX(0x000003f6, "TODO_0000_03f6", UINT16_MAX, UINT64_C(0xffffffffffff0000), 0),
+ MVO(0x000003f8, "TODO_0000_03f8", 0),
+ MVO(0x000003f9, "TODO_0000_03f9", UINT64_C(0x27495a818)),
+ MVO(0x000003fa, "TODO_0000_03fa", UINT64_C(0x428fa6c6207)),
+ MVO(0x000003fc, "TODO_0000_03fc", 0x389bb693),
+ MVO(0x000003fd, "TODO_0000_03fd", 0x13323393),
+ MVO(0x000003fe, "TODO_0000_03fe", UINT64_C(0x48d7ffc9bd1)),
+ RFN(0x00000400, 0x00000423, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MVO(0x00000480, "TODO_0000_0480", UINT64_C(0xda040000000010)),
+ MVO(0x00000481, "TODO_0000_0481", UINT64_C(0x7f00000016)),
+ MVO(0x00000482, "TODO_0000_0482", UINT64_C(0xfff9fffe0401e172)),
+ MVO(0x00000483, "TODO_0000_0483", UINT64_C(0x7fffff00036dff)),
+ MVO(0x00000484, "TODO_0000_0484", UINT64_C(0xffff000011ff)),
+ MVO(0x00000485, "TODO_0000_0485", 0x100401e5),
+ MVO(0x00000486, "TODO_0000_0486", UINT32_C(0x80000021)),
+ MVO(0x00000487, "TODO_0000_0487", UINT32_MAX),
+ MVO(0x00000488, "TODO_0000_0488", 0x2000),
+ MVO(0x00000489, "TODO_0000_0489", 0x1767ff),
+ MVO(0x0000048a, "TODO_0000_048a", 0x2a),
+ MVO(0x0000048b, "TODO_0000_048b", UINT64_C(0x8ff00000000)),
+ MVO(0x0000048c, "TODO_0000_048c", UINT64_C(0xf0106114141)),
+ MVO(0x0000048d, "TODO_0000_048d", UINT64_C(0x7f00000016)),
+ MVO(0x0000048e, "TODO_0000_048e", UINT64_C(0xfff9fffe04006172)),
+ MVO(0x0000048f, "TODO_0000_048f", UINT64_C(0x7fffff00036dfb)),
+ MVO(0x00000490, "TODO_0000_0490", UINT64_C(0xffff000011fb)),
+ MVX(0x000004c1, "TODO_0000_04c1", 0, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x000004c2, "TODO_0000_04c2", 0, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x000004c3, "TODO_0000_04c3", 0, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x000004c4, "TODO_0000_04c4", 0, 0, UINT64_C(0xffff000000000000)),
+ MFX(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea, 0, 0, UINT64_C(0xffff800000000000)), /* value=0x0 */
+ MVX(0x00000601, "TODO_0000_0601", UINT64_C(0x1814149480000380), UINT32_C(0x80001fff), 0x7fffe000),
+ MVX(0x00000602, "TODO_0000_0602", UINT64_C(0x1814149480000170), UINT32_C(0x80001fff), 0x7fffe000),
+ MVX(0x00000603, "TODO_0000_0603", UINT32_C(0x80303030), UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)),
+ MVX(0x00000604, "TODO_0000_0604", UINT32_C(0x80646464), UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)),
+ MVO(0x00000606, "TODO_0000_0606", 0xa1003),
+ MVX(0x0000060a, "TODO_0000_060a", 0x8894, 0, UINT64_C(0xffffffffffff6000)),
+ MVX(0x0000060b, "TODO_0000_060b", 0x88a9, 0, UINT64_C(0xffffffffffff6000)),
+ MVX(0x0000060c, "TODO_0000_060c", 0x88c6, 0, UINT64_C(0xffffffffffff6000)),
+ MVO(0x0000060d, "TODO_0000_060d", UINT64_C(0xd0fd23dd9)),
+ MVX(0x00000610, "TODO_0000_0610", UINT64_C(0x800083e800dd8320), UINT64_C(0x80ffffff00ffffff), UINT64_C(0x7f000000ff000000)),
+ MVO(0x00000611, "TODO_0000_0611", 0x2ed06e3b),
+ MVO(0x00000614, "TODO_0000_0614", 0x1200168),
+ MVX(0x00000638, "TODO_0000_0638", UINT32_C(0x80000000), UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)),
+ MVO(0x00000639, "TODO_0000_0639", 0x106344fd),
+ MVX(0x0000063a, "TODO_0000_063a", 0, 0, UINT64_C(0xffffffffffffffe0)),
+ MVX(0x00000640, "TODO_0000_0640", UINT32_C(0x80000000), UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)),
+ MVO(0x00000641, "TODO_0000_0641", 0xb39e93),
+ MVX(0x00000642, "TODO_0000_0642", 0x10, 0, UINT64_C(0xffffffffffffffe0)),
+ MVO(0x00000648, "TODO_0000_0648", 0x1b),
+ MVO(0x00000649, "TODO_0000_0649", UINT64_C(0x120000000000000)),
+ MVO(0x0000064a, "TODO_0000_064a", UINT64_C(0x120000000000000)),
+ MVO(0x0000064b, "TODO_0000_064b", UINT32_C(0x80000000)),
+ MVX(0x0000064c, "TODO_0000_064c", UINT32_C(0x80000000), UINT32_C(0x800000ff), UINT64_C(0xffffffff7fffff00)),
+ MVX(0x00000680, "TODO_0000_0680", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000681, "TODO_0000_0681", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000682, "TODO_0000_0682", UINT64_C(0x7fffff7fa38c2289), 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000683, "TODO_0000_0683", UINT64_C(0x7fffff80214b24cb), 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000684, "TODO_0000_0684", UINT64_C(0x7fffff7fa38c2298), 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000685, "TODO_0000_0685", UINT64_C(0x7fffff80214b24ee), 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000686, "TODO_0000_0686", UINT64_C(0x7fffff7fa38c2289), 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000687, "TODO_0000_0687", UINT64_C(0x7fffff80214b24cb), 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000688, "TODO_0000_0688", UINT64_C(0x7fffff7fa38c2298), 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x00000689, "TODO_0000_0689", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x0000068a, "TODO_0000_068a", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x0000068b, "TODO_0000_068b", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x0000068c, "TODO_0000_068c", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x0000068d, "TODO_0000_068d", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x0000068e, "TODO_0000_068e", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x0000068f, "TODO_0000_068f", 0, 0, UINT64_C(0x7fff800000000000)),
+ MVX(0x000006c0, "TODO_0000_06c0", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c1, "TODO_0000_06c1", UINT64_C(0xffffff7fa38c227f), 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c2, "TODO_0000_06c2", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c3, "TODO_0000_06c3", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c4, "TODO_0000_06c4", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c5, "TODO_0000_06c5", UINT64_C(0xffffff7fa38c227f), 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c6, "TODO_0000_06c6", UINT64_C(0xffffff80214b24c0), 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c7, "TODO_0000_06c7", UINT64_C(0xffffff7fa38c228f), 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c8, "TODO_0000_06c8", UINT64_C(0xffffff80214b24e0), 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006c9, "TODO_0000_06c9", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006ca, "TODO_0000_06ca", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006cb, "TODO_0000_06cb", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006cc, "TODO_0000_06cc", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006cd, "TODO_0000_06cd", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006ce, "TODO_0000_06ce", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006cf, "TODO_0000_06cf", 0, 0, UINT64_C(0xffff800000000000)),
+ MVX(0x000006e0, "TODO_0000_06e0", UINT64_C(0x535157ca1ca), 0x80000, 0),
+ MVX(0x00000700, "TODO_0000_0700", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000701, "TODO_0000_0701", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000702, "TODO_0000_0702", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000703, "TODO_0000_0703", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000704, "TODO_0000_0704", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000705, "TODO_0000_0705", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000706, "TODO_0000_0706", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000707, "TODO_0000_0707", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000708, "TODO_0000_0708", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000709, "TODO_0000_0709", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000710, "TODO_0000_0710", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000711, "TODO_0000_0711", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000712, "TODO_0000_0712", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000713, "TODO_0000_0713", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000714, "TODO_0000_0714", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000715, "TODO_0000_0715", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000716, "TODO_0000_0716", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000717, "TODO_0000_0717", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000718, "TODO_0000_0718", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000719, "TODO_0000_0719", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000720, "TODO_0000_0720", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000721, "TODO_0000_0721", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000722, "TODO_0000_0722", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000723, "TODO_0000_0723", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000724, "TODO_0000_0724", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000725, "TODO_0000_0725", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000726, "TODO_0000_0726", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000727, "TODO_0000_0727", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000728, "TODO_0000_0728", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000729, "TODO_0000_0729", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000730, "TODO_0000_0730", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000731, "TODO_0000_0731", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000732, "TODO_0000_0732", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000733, "TODO_0000_0733", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000734, "TODO_0000_0734", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000735, "TODO_0000_0735", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000736, "TODO_0000_0736", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000737, "TODO_0000_0737", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000738, "TODO_0000_0738", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000739, "TODO_0000_0739", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000740, "TODO_0000_0740", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000741, "TODO_0000_0741", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000742, "TODO_0000_0742", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000743, "TODO_0000_0743", 0, 0, UINT64_C(0xffffffffe0230000)),
+ MVX(0x00000744, "TODO_0000_0744", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000745, "TODO_0000_0745", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000746, "TODO_0000_0746", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000747, "TODO_0000_0747", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000748, "TODO_0000_0748", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000749, "TODO_0000_0749", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000c80, "TODO_0000_0c80", 0, 0, 0),
+ MVX(0x00000c81, "TODO_0000_0c81", 0, 0, 0),
+ MVX(0x00000c82, "TODO_0000_0c82", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00000c83, "TODO_0000_0c83", 0, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFX(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget, 0, 0, 0), /* value=0x1b0008`00000000 */
+ MFX(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget, 0, 0, UINT64_C(0xffff800000000000)), /* value=0xffffff80`214ce6c0 */
+ MFX(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget, 0, 0, UINT64_C(0xffff800000000000)), /* value=0x0 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x4700 */
+ MFX(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase, 0, 0, UINT64_C(0xffff800000000000)), /* value=0x0 */
+ MFX(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase, 0, 0, UINT64_C(0xffff800000000000)), /* value=0xffffff81`e942f000 */
+ MFX(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase, 0, 0, UINT64_C(0xffff800000000000)), /* value=0x7fff`7ccad1e0 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, 0, ~(uint64_t)UINT32_MAX), /* value=0x0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Core(TM) i7-3820QM CPU @ 2.70GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Core_i7_3820QM =
+{
+ /*.pszName = */ "Intel Core i7-3820QM",
+ /*.pszFullName = */ "Intel(R) Core(TM) i7-3820QM CPU @ 2.70GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 58,
+ /*.uStepping = */ 9,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_Core7_IvyBridge,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 36,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Core_i7_3820QM),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Core_i7_3820QM)),
+ /*.enmUnknownCpuId = */ CPUMUKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
+ /*.DefUnknownCpuId = */ { 0x00000007, 0x00000340, 0x00000340, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.apaMsrRanges[] = */
+ {
+ NULL_ALONE(g_aMsrRanges_Intel_Core_i7_3820QM),
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ }
+};
+
+#endif /* !VBOX_DB_Intel_Core_i7_3820QM */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_3960X.h b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_3960X.h
new file mode 100644
index 00000000..6f746860
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Core_i7_3960X.h
@@ -0,0 +1,365 @@
+/* $Id: Intel_Core_i7_3960X.h $ */
+/** @file
+ * CPU database entry "Intel Core i7-3960X".
+ * Generated at 2013-12-12T15:29:11Z by VBoxCpuReport v4.3.53r91237 on win.amd64.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VBOX_CPUDB_Intel_Core_i7_3960X
+#define VBOX_CPUDB_Intel_Core_i7_3960X
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Core(TM) i7-3960X CPU @ 3.30GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Core_i7_3960X[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000d, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000206d6, 0x02200800, 0x1fbee3bf, 0xbfebfbff, 0 },
+ { 0x00000002, 0x00000000, 0x00000000, 0x76035a01, 0x00f0b2ff, 0x00000000, 0x00ca0000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, UINT32_MAX, 0x3c004121, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000004, 0x00000001, UINT32_MAX, 0x3c004122, 0x01c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000004, 0x00000002, UINT32_MAX, 0x3c004143, 0x01c0003f, 0x000001ff, 0x00000000, 0 },
+ { 0x00000004, 0x00000003, UINT32_MAX, 0x3c07c163, 0x04c0003f, 0x00002fff, 0x00000006, 0 },
+ { 0x00000004, 0x00000004, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00021120, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000077, 0x00000002, 0x00000001, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07300403, 0x00000000, 0x00000000, 0x00000603, 0 },
+ { 0x0000000b, 0x00000000, UINT32_MAX, 0x00000001, 0x00000002, 0x00000100, 0x00000002, 0 },
+ { 0x0000000b, 0x00000001, UINT32_MAX, 0x00000005, 0x0000000c, 0x00000201, 0x00000002, 0 },
+ { 0x0000000b, 0x00000002, UINT32_MAX, 0x00000000, 0x00000000, 0x00000002, 0x00000002, 0 | CPUMCPUIDLEAF_F_SUBLEAVES_ECX_UNCHANGED },
+ { 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000000, UINT32_MAX, 0x00000007, 0x00000340, 0x00000340, 0x00000000, 0 },
+ { 0x0000000d, 0x00000001, UINT32_MAX, 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000002, UINT32_MAX, 0x00000100, 0x00000240, 0x00000000, 0x00000000, 0 },
+ { 0x0000000d, 0x00000003, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x2c100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20202020, 0x49202020, 0x6c65746e, 0x20295228, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x65726f43, 0x294d5428, 0x2d376920, 0x30363933, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x50432058, 0x20402055, 0x30332e33, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01006040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000100, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x0000302e, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Core(TM) i7-3960X CPU @ 3.30GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Core_i7_3960X[] =
+{
+ MFX(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr, Ia32P5McAddr, 0, UINT64_C(0xffffffffffffffe0), 0), /* value=0x1f */
+ MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX), /* value=0x0 */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, 0, UINT64_C(0xffffffffffff0000)), /* value=0x40 */
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x177ab4`48466b19 */
+ MFV(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x8000000000000)),
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xffffc000000002ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVX(0x0000002e, "I7_UNK_0000_002e", 0, 0x400, UINT64_C(0xfffffffffffffbff)),
+ MVX(0x00000033, "TEST_CTL", 0, 0, UINT64_C(0xffffffff7fffffff)),
+ MVO(0x00000034, "P6_UNK_0000_0034", 0x4cb),
+ MFO(0x00000035, "MSR_CORE_THREAD_COUNT", IntelI7CoreThreadCount), /* value=0x6000c*/
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x5 */
+ MVX(0x0000003e, "I7_UNK_0000_003e", 0x1, 0, UINT64_C(0xfffffffffffffffe)),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, Ia32BiosUpdateTrigger),
+ MVX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", UINT64_C(0x61600000000), 0, UINT32_C(0xfffffffe)),
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ RSN(0x000000c1, 0x000000c4, "IA32_PMCn", Ia32PmcN, Ia32PmcN, 0x0, ~(uint64_t)UINT32_MAX, 0),
+ MFO(0x000000ce, "MSR_PLATFORM_INFO", IntelPlatformInfo), /* value=0xc00'70012100*/
+ MFX(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl, IntelPkgCStConfigControl, 0, 0, UINT64_C(0xffffffffe1ffffff)), /* value=0x1e008400 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, 0, UINT64_C(0xfffffffffff80000)), /* value=0x20414 */
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0x2be98e4 */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x2d84ced */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0xd0a, 0, 0), /* value=0xd0a */
+ MFN(0x00000132, "CPUID1_FEATURE_MASK", IntelCpuId1FeatureMaskEax, IntelCpuId1FeatureMaskEax), /* value=0xffffffff`ffffffff */
+ MFN(0x00000133, "CPUIDD_01_FEATURE_MASK", IntelCpuId1FeatureMaskEcdx, IntelCpuId1FeatureMaskEcdx), /* value=0xffffffff`ffffffff */
+ MFN(0x00000134, "CPUID80000001_FEATURE_MASK", IntelCpuId80000001FeatureMaskEcdx, IntelCpuId80000001FeatureMaskEcdx), /* value=0xffffffff`ffffffff */
+ MFO(0x0000013c, "I7_SB_AES_NI_CTL", IntelI7SandyAesNiCtl), /* value=0x1 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, 0, UINT64_C(0xffff800000000000)), /* value=0x0 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, 0, UINT64_C(0xffff800000000000)), /* value=0x0 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0xc12, 0, 0), /* value=0xc12 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x0 */
+ MFX(0x0000017f, "I7_SB_ERROR_CONTROL", IntelI7SandyErrorControl, IntelI7SandyErrorControl, 0, 0xc, UINT64_C(0xffffffffffffffe1)), /* value=0x0 */
+ RSN(0x00000186, 0x00000189, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, 0, UINT64_C(0xffffffff00080000)),
+ MFX(0x00000194, "CLOCK_FLEX_MAX", IntelFlexRatio, IntelFlexRatio, 0xf2100, 0xe0000, UINT64_C(0xfffffffffff00000)),
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0x288300002400), 0, 0), /* value=0x2883`00002400 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x2700, 0, 0), /* Might bite. value=0x2700 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x0 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0, 0, UINT64_C(0xfffffffffe0000e8)), /* value=0x0 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, UINT32_C(0x88380000), UINT32_C(0xf87f0fff), UINT64_C(0xffffffff0780f000)), /* value=0x88380000 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x850089, 0x1080, UINT64_C(0xffffffbbff3aef72)), /* value=0x850089 */
+ MFX(0x000001a2, "I7_MSR_TEMPERATURE_TARGET", IntelI7TemperatureTarget, IntelI7TemperatureTarget, 0x5b0a00, 0xffff00, UINT64_C(0xfffffffff00000ff)), /* value=0x5b0a00 */
+ MVX(0x000001a4, "I7_UNK_0000_01a4", 0, 0, UINT64_C(0xfffffffffffff7f0)),
+ RSN(0x000001a6, 0x000001a7, "I7_MSR_OFFCORE_RSP_n", IntelI7MsrOffCoreResponseN, IntelI7MsrOffCoreResponseN, 0x0, 0, UINT64_C(0xffffffc000007000)),
+ MVX(0x000001a8, "I7_UNK_0000_01a8", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFX(0x000001aa, "MSR_MISC_PWR_MGMT", IntelI7MiscPwrMgmt, IntelI7MiscPwrMgmt, 0, 0, UINT64_C(0xffffffffffbffffe)), /* value=0x400000 */
+ MFX(0x000001ad, "I7_MSR_TURBO_RATIO_LIMIT", IntelI7TurboRatioLimit, IntelI7TurboRatioLimit, UINT64_C(0x2424242425252727), 0, 0), /* value=0x24242424`25252727 */
+ MVX(0x000001b1, "IA32_PACKAGE_THERM_STATUS", UINT32_C(0x88310000), UINT32_C(0xf87f0fff), UINT64_C(0xffffffff0780f000)),
+ MVX(0x000001b2, "IA32_PACKAGE_THERM_INTERRUPT", 0, 0, UINT64_C(0xfffffffffe0000e8)),
+ MVO(0x000001c6, "I7_UNK_0000_01c6", 0x3),
+ MFX(0x000001c8, "MSR_LBR_SELECT", IntelI7LbrSelect, IntelI7LbrSelect, 0, 0, UINT64_C(0xfffffffffffffe00)), /* value=0x0 */
+ MFX(0x000001c9, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, 0, UINT64_C(0xfffffffffffffff0)), /* value=0xc */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffff803c)), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0x7ffff880`093814ea */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xfffff880`093a60e0 */
+ MFX(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp, P6LastIntFromIp, 0, 0, UINT64_C(0x7fff800000000000)), /* value=0x0 */
+ MFX(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp, P6LastIntToIp, 0, 0, UINT64_C(0xffff800000000000)), /* value=0x0 */
+ MVO(0x000001e1, "I7_SB_UNK_0000_01e1", 0x2),
+ MVX(0x000001ef, "I7_SB_UNK_0000_01ef", 0xff, 0, UINT64_MAX),
+ MFO(0x000001f0, "I7_VLW_CAPABILITY", IntelI7VirtualLegacyWireCap), /* value=0x74 */
+ MFO(0x000001f2, "IA32_SMRR_PHYSBASE", Ia32SmrrPhysBase), /* value=0xad800006 */
+ MFO(0x000001f3, "IA32_SMRR_PHYSMASK", Ia32SmrrPhysMask), /* value=0xff800800 */
+ MFX(0x000001f8, "IA32_PLATFORM_DCA_CAP", Ia32PlatformDcaCap, Ia32PlatformDcaCap, 0, 0, UINT64_C(0xfffffffffffffffe)), /* value=0x1 */
+ MFO(0x000001f9, "IA32_CPU_DCA_CAP", Ia32CpuDcaCap), /* value=0x1 */
+ MFX(0x000001fa, "IA32_DCA_0_CAP", Ia32Dca0Cap, Ia32Dca0Cap, 0, 0x40007ff, UINT64_C(0xfffffffffafe1800)), /* value=0x1e489 */
+ MFX(0x000001fc, "I7_MSR_POWER_CTL", IntelI7PowerCtl, IntelI7PowerCtl, 0, 0, UINT64_C(0xffffffff00320020)), /* value=0x2500005b */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffffc00000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffffc000000007ff)), /* value=0x3ffc`00000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffffc00000000ff8)), /* value=0x4`00000006 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffffc000000007ff)), /* value=0x3fff`c0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffffc00000000ff8)), /* value=0x4`40000006 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffffc000000007ff)), /* value=0x3fff`f0000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffffc00000000ff8)), /* value=0xae000000 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffffc000000007ff)), /* value=0x3fff`fe000800 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffffc00000000ff8)), /* value=0xb0000000 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffffc000000007ff)), /* value=0x3fff`f0000800 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffffc00000000ff8)), /* value=0xc0000000 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffffc000000007ff)), /* value=0x3fff`c0000800 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffffc00000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffffc000000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffffc00000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffffc000000007ff)), /* value=0x0 */
+ MFX(0x00000210, "IA32_MTRR_PHYS_BASE8", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x8, 0, UINT64_C(0xffffc00000000ff8)), /* value=0x0 */
+ MFX(0x00000211, "IA32_MTRR_PHYS_MASK8", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x8, 0, UINT64_C(0xffffc000000007ff)), /* value=0x0 */
+ MFX(0x00000212, "IA32_MTRR_PHYS_BASE9", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x9, 0, UINT64_C(0xffffc00000000ff8)), /* value=0x0 */
+ MFX(0x00000213, "IA32_MTRR_PHYS_MASK9", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x9, 0, UINT64_C(0xffffc000000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ RSN(0x00000280, 0x00000281, "IA32_MC0_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x0, 0, UINT64_C(0xffffffffbfff8000)),
+ MFX(0x00000282, "IA32_MC2_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x2, 0x40007fff, UINT64_C(0xffffffffbfff8000)), /* value=0x0 */
+ MFX(0x00000283, "IA32_MC3_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x3, 0, UINT64_C(0xffffffffbfff8000)), /* value=0x40000001 */
+ MFX(0x00000284, "IA32_MC4_CTL2", Ia32McNCtl2, Ia32McNCtl2, 0x4, 0x40007fff, UINT64_C(0xffffffffbfff8000)), /* value=0x0 */
+ RSN(0x00000285, 0x00000287, "IA32_MC5_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x5, 0, UINT64_C(0xffffffffbfff8000)),
+ RSN(0x00000288, 0x0000028b, "IA32_MC8_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0x8, 0x1, UINT64_C(0xffffffffbfff8000)),
+ RSN(0x0000028c, 0x00000291, "IA32_MC12_CTLn", Ia32McNCtl2, Ia32McNCtl2, 0xc, 0, UINT64_C(0xffffffffbfff8000)),
+ MVX(0x000002e0, "I7_SB_NO_EVICT_MODE", 0, 0, UINT64_C(0xfffffffffffffffc)),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ MVO(0x00000300, "I7_SB_UNK_0000_0300", UINT32_C(0x8000ff00)),
+ MVO(0x00000305, "I7_SB_UNK_0000_0305", 0),
+ RSN(0x00000309, 0x0000030b, "IA32_FIXED_CTRn", Ia32FixedCtrN, Ia32FixedCtrN, 0x0, 0, UINT64_C(0xffff000000000000)),
+ MFX(0x00000345, "IA32_PERF_CAPABILITIES", Ia32PerfCapabilities, ReadOnly, 0x31c3, 0, 0), /* value=0x31c3 */
+ MFX(0x0000038d, "IA32_FIXED_CTR_CTRL", Ia32FixedCtrCtrl, Ia32FixedCtrCtrl, 0, 0, UINT64_C(0xfffffffffffff000)), /* value=0x0 */
+ MFX(0x0000038e, "IA32_PERF_GLOBAL_STATUS", Ia32PerfGlobalStatus, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x0000038f, "IA32_PERF_GLOBAL_CTRL", Ia32PerfGlobalCtrl, Ia32PerfGlobalCtrl, 0, 0, UINT64_C(0xfffffff8fffffff0)), /* value=0xf */
+ MFX(0x00000390, "IA32_PERF_GLOBAL_OVF_CTRL", Ia32PerfGlobalOvfCtrl, Ia32PerfGlobalOvfCtrl, 0, UINT64_C(0xe00000070000000f), UINT64_C(0x1ffffff8fffffff0)), /* value=0x0 */
+ MFX(0x0000039c, "I7_SB_MSR_PEBS_NUM_ALT", IntelI7SandyPebsNumAlt, IntelI7SandyPebsNumAlt, 0, 0, UINT64_C(0xfffffffffffffffe)), /* value=0x0 */
+ MFX(0x000003f1, "IA32_PEBS_ENABLE", Ia32PebsEnable, Ia32PebsEnable, 0, 0, UINT64_C(0x7ffffff0fffffff0)), /* value=0x0 */
+ MFX(0x000003f6, "I7_MSR_PEBS_LD_LAT", IntelI7PebsLdLat, IntelI7PebsLdLat, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0xffff */
+ MFX(0x000003f8, "I7_MSR_PKG_C3_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x0 */
+ RSN(0x000003f9, 0x000003fa, "I7_MSR_PKG_Cn_RESIDENCY", IntelI7PkgCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX),
+ MFX(0x000003fc, "I7_MSR_CORE_C3_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x3, 0, UINT64_MAX), /* value=0x3f8f`5718a87c */
+ RSN(0x000003fd, 0x000003fe, "I7_MSR_CORE_Cn_RESIDENCY", IntelI7CoreCnResidencyN, ReadOnly, 0x6, 0, UINT64_MAX),
+ RFN(0x00000400, 0x00000447, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0x00000480, "IA32_VMX_BASIC", Ia32VmxBase, ReadOnly, UINT64_C(0xda040000000010), 0, 0), /* value=0xda0400`00000010 */
+ MFX(0x00000481, "IA32_VMX_PINBASED_CTLS", Ia32VmxPinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x00000482, "IA32_VMX_PROCBASED_CTLS", Ia32VmxProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe0401e172), 0, 0), /* value=0xfff9fffe`0401e172 */
+ MFX(0x00000483, "IA32_VMX_EXIT_CTLS", Ia32VmxExitCtls, ReadOnly, UINT64_C(0x7fffff00036dff), 0, 0), /* value=0x7fffff`00036dff */
+ MFX(0x00000484, "IA32_VMX_ENTRY_CTLS", Ia32VmxEntryCtls, ReadOnly, UINT64_C(0xffff000011ff), 0, 0), /* value=0xffff`000011ff */
+ MFX(0x00000485, "IA32_VMX_MISC", Ia32VmxMisc, ReadOnly, 0x100401e5, 0, 0), /* value=0x100401e5 */
+ MFX(0x00000486, "IA32_VMX_CR0_FIXED0", Ia32VmxCr0Fixed0, ReadOnly, UINT32_C(0x80000021), 0, 0), /* value=0x80000021 */
+ MFX(0x00000487, "IA32_VMX_CR0_FIXED1", Ia32VmxCr0Fixed1, ReadOnly, UINT32_MAX, 0, 0), /* value=0xffffffff */
+ MFX(0x00000488, "IA32_VMX_CR4_FIXED0", Ia32VmxCr4Fixed0, ReadOnly, 0x2000, 0, 0), /* value=0x2000 */
+ MFX(0x00000489, "IA32_VMX_CR4_FIXED1", Ia32VmxCr4Fixed1, ReadOnly, 0x627ff, 0, 0), /* value=0x627ff */
+ MFX(0x0000048a, "IA32_VMX_VMCS_ENUM", Ia32VmxVmcsEnum, ReadOnly, 0x2a, 0, 0), /* value=0x2a */
+ MFX(0x0000048b, "IA32_VMX_PROCBASED_CTLS2", Ia32VmxProcBasedCtls2, ReadOnly, UINT64_C(0x4ff00000000), 0, 0), /* value=0x4ff`00000000 */
+ MFX(0x0000048c, "IA32_VMX_EPT_VPID_CAP", Ia32VmxEptVpidCap, ReadOnly, UINT64_C(0xf0106134141), 0, 0), /* value=0xf01`06134141 */
+ MFX(0x0000048d, "IA32_VMX_TRUE_PINBASED_CTLS", Ia32VmxTruePinbasedCtls, ReadOnly, UINT64_C(0x7f00000016), 0, 0), /* value=0x7f`00000016 */
+ MFX(0x0000048e, "IA32_VMX_TRUE_PROCBASED_CTLS", Ia32VmxTrueProcbasedCtls, ReadOnly, UINT64_C(0xfff9fffe04006172), 0, 0), /* value=0xfff9fffe`04006172 */
+ MFX(0x0000048f, "IA32_VMX_TRUE_EXIT_CTLS", Ia32VmxTrueExitCtls, ReadOnly, UINT64_C(0x7fffff00036dfb), 0, 0), /* value=0x7fffff`00036dfb */
+ MFX(0x00000490, "IA32_VMX_TRUE_ENTRY_CTLS", Ia32VmxTrueEntryCtls, ReadOnly, UINT64_C(0xffff000011fb), 0, 0), /* value=0xffff`000011fb */
+ RSN(0x000004c1, 0x000004c4, "IA32_A_PMCn", Ia32PmcN, Ia32PmcN, 0x0, 0, UINT64_C(0xffff000000000000)),
+ MVO(0x00000502, "I7_SB_UNK_0000_0502", 0),
+ MFX(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea, 0, 0, UINT64_C(0xffff800000000000)), /* value=0x0 */
+ MFX(0x00000601, "I7_SB_MSR_VR_CURRENT_CONFIG", IntelI7SandyVrCurrentConfig, IntelI7SandyVrCurrentConfig, 0, UINT32_C(0x80001fff), 0x7fffe000), /* value=0x141494`80000640 */
+ MFX(0x00000603, "I7_SB_MSR_VR_MISC_CONFIG", IntelI7SandyVrMiscConfig, IntelI7SandyVrMiscConfig, 0, UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)), /* value=0x80151515 */
+ MFO(0x00000606, "I7_SB_MSR_RAPL_POWER_UNIT", IntelI7SandyRaplPowerUnit), /* value=0xa1003 */
+ MFX(0x0000060a, "I7_SB_MSR_PKGC3_IRTL", IntelI7SandyPkgCnIrtlN, IntelI7SandyPkgCnIrtlN, 0x3, 0, UINT64_C(0xffffffffffff6000)), /* value=0x0 */
+ RSN(0x0000060b, 0x0000060c, "I7_SB_MSR_PKGC6_IRTn", IntelI7SandyPkgCnIrtlN, IntelI7SandyPkgCnIrtlN, 0x6, 0, UINT64_C(0xffffffffffff6000)),
+ MFO(0x0000060d, "I7_SB_MSR_PKG_C2_RESIDENCY", IntelI7SandyPkgC2Residency), /* value=0x0 */
+ MFX(0x00000610, "I7_SB_MSR_PKG_POWER_LIMIT", IntelI7RaplPkgPowerLimit, IntelI7RaplPkgPowerLimit, 0, UINT64_C(0x80ffffff00ffffff), UINT64_C(0x7f000000ff000000)), /* value=0x80068960`005affff */
+ MFO(0x00000611, "I7_SB_MSR_PKG_ENERGY_STATUS", IntelI7RaplPkgEnergyStatus), /* value=0xc120ff02 */
+ MFO(0x00000613, "I7_SB_MSR_PKG_PERF_STATUS", IntelI7RaplPkgPerfStatus), /* value=0x0 */
+ MFO(0x00000614, "I7_SB_MSR_PKG_POWER_INFO", IntelI7RaplPkgPowerInfo), /* value=0x1a80410 */
+ MFX(0x00000618, "I7_SB_MSR_DRAM_POWER_LIMIT", IntelI7RaplDramPowerLimit, IntelI7RaplDramPowerLimit, 0, UINT32_C(0x80feffff), UINT64_C(0xffffffff7f010000)), /* value=0x80000000 */
+ MFO(0x00000619, "I7_SB_MSR_DRAM_ENERGY_STATUS", IntelI7RaplDramEnergyStatus), /* value=0x0 */
+ MFO(0x0000061b, "I7_SB_MSR_DRAM_PERF_STATUS", IntelI7RaplDramPerfStatus), /* value=0x0 */
+ MFO(0x0000061c, "I7_SB_MSR_DRAM_POWER_INFO", IntelI7RaplDramPowerInfo), /* value=0x280258`00780118 */
+ MFX(0x00000638, "I7_SB_MSR_PP0_POWER_LIMIT", IntelI7RaplPp0PowerLimit, IntelI7RaplPp0PowerLimit, 0, UINT32_C(0x80ffffff), UINT64_C(0xffffffff7f000000)), /* value=0x80000000 */
+ MFO(0x00000639, "I7_SB_MSR_PP0_ENERGY_STATUS", IntelI7RaplPp0EnergyStatus), /* value=0x448bc04 */
+ MFX(0x0000063a, "I7_SB_MSR_PP0_POLICY", IntelI7RaplPp0Policy, IntelI7RaplPp0Policy, 0, 0, UINT64_C(0xffffffffffffffe0)), /* value=0x0 */
+ MFO(0x0000063b, "I7_SB_MSR_PP0_PERF_STATUS", IntelI7RaplPp0PerfStatus), /* value=0x0 */
+ RFN(0x00000680, 0x0000068f, "MSR_LASTBRANCH_n_FROM_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ RFN(0x000006c0, 0x000006cf, "MSR_LASTBRANCH_n_TO_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ MFI(0x000006e0, "IA32_TSC_DEADLINE", Ia32TscDeadline), /* value=0x0 */
+ MVX(0x00000a00, "I7_SB_UNK_0000_0a00", 0, 0, UINT64_C(0xfffffffffffffec0)),
+ MVX(0x00000a01, "I7_SB_UNK_0000_0a01", 0x178fa000, 0, UINT64_C(0xffffffff00000f80)),
+ MVX(0x00000a02, "I7_SB_UNK_0000_0a02", 0, 0, UINT64_C(0xffffffff20002000)),
+ MVX(0x00000c00, "I7_SB_UNK_0000_0c00", 0, 0, UINT64_C(0xffffffffbfffff00)),
+ MVX(0x00000c01, "I7_SB_UNK_0000_0c01", 0, 0x9229fe7, UINT64_C(0xfffffffff6dd6018)),
+ MVO(0x00000c06, "I7_SB_UNK_0000_0c06", 0x6),
+ MVX(0x00000c08, "I7_SB_UNK_0000_0c08", 0, 0, UINT64_C(0xffffffffffafffff)),
+ MVX(0x00000c09, "I7_SB_UNK_0000_0c09", 0x301a, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x00000c10, "I7_SB_UNK_0000_0c10", 0, 0x20000, UINT64_C(0xffffffffe0210000)),
+ MVX(0x00000c11, "I7_SB_UNK_0000_0c11", 0, 0x20000, UINT64_C(0xffffffffe0210000)),
+ MVX(0x00000c14, "I7_SB_UNK_0000_0c14", 0, 0, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000c15, "I7_SB_UNK_0000_0c15", 0, 0x3, UINT64_C(0xfffffffffffffffc)),
+ MVX(0x00000c16, "I7_SB_UNK_0000_0c16", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000c17, "I7_SB_UNK_0000_0c17", 0, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000c24, "I7_SB_UNK_0000_0c24", 0, 0x3, UINT64_C(0xfffffffffffcfefc)),
+ MVX(0x00000c30, "I7_SB_UNK_0000_0c30", 0, 0x20000, UINT64_C(0xffffffff20013f00)),
+ MVX(0x00000c31, "I7_SB_UNK_0000_0c31", 0, 0x20000, UINT64_C(0xffffffff20013f00)),
+ MVX(0x00000c32, "I7_SB_UNK_0000_0c32", 0, 0x20000, UINT64_C(0xffffffff20013f00)),
+ MVX(0x00000c33, "I7_SB_UNK_0000_0c33", 0, 0x20000, UINT64_C(0xffffffff20013f00)),
+ MVX(0x00000c34, "I7_SB_UNK_0000_0c34", 0, 0, ~(uint64_t)UINT32_MAX),
+ MVX(0x00000c35, "I7_SB_UNK_0000_0c35", 0, 0x7f, UINT64_C(0xffffffffffffff80)),
+ MVX(0x00000c36, "I7_SB_UNK_0000_0c36", 0x203, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x00000c37, "I7_SB_UNK_0000_0c37", 0x203, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x00000c38, "I7_SB_UNK_0000_0c38", 0x20c, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x00000c39, "I7_SB_UNK_0000_0c39", 0x203, 0, UINT64_C(0xffff000000000000)),
+ MVX(0x00000d04, "I7_SB_UNK_0000_0d04", 0, 0x3, UINT64_C(0xfffffffffffcfefc)),
+ MVX(0x00000d10, "I7_SB_UNK_0000_0d10", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d11, "I7_SB_UNK_0000_0d11", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d12, "I7_SB_UNK_0000_0d12", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d13, "I7_SB_UNK_0000_0d13", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d14, "I7_SB_UNK_0000_0d14", 0x20, 0, UINT64_C(0xffffffff00000300)),
+ MVX(0x00000d15, "I7_SB_UNK_0000_0d15", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000d16, "I7_SB_UNK_0000_0d16", 0x81c, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d17, "I7_SB_UNK_0000_0d17", 0x80c, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d18, "I7_SB_UNK_0000_0d18", 0x80c, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d19, "I7_SB_UNK_0000_0d19", 0x810, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d24, "I7_SB_UNK_0000_0d24", 0, 0x3, UINT64_C(0xfffffffffffcfefc)),
+ MVX(0x00000d30, "I7_SB_UNK_0000_0d30", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d31, "I7_SB_UNK_0000_0d31", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d32, "I7_SB_UNK_0000_0d32", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d33, "I7_SB_UNK_0000_0d33", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d34, "I7_SB_UNK_0000_0d34", 0x20, 0, UINT64_C(0xffffffff00000300)),
+ MVX(0x00000d35, "I7_SB_UNK_0000_0d35", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000d36, "I7_SB_UNK_0000_0d36", 0x864, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d37, "I7_SB_UNK_0000_0d37", 0x804, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d38, "I7_SB_UNK_0000_0d38", 0x822, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d39, "I7_SB_UNK_0000_0d39", 0x81c, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d44, "I7_SB_UNK_0000_0d44", 0, 0x3, UINT64_C(0xfffffffffffcfefc)),
+ MVX(0x00000d50, "I7_SB_UNK_0000_0d50", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d51, "I7_SB_UNK_0000_0d51", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d52, "I7_SB_UNK_0000_0d52", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d53, "I7_SB_UNK_0000_0d53", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d54, "I7_SB_UNK_0000_0d54", 0x20, 0, UINT64_C(0xffffffff00000300)),
+ MVX(0x00000d55, "I7_SB_UNK_0000_0d55", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000d56, "I7_SB_UNK_0000_0d56", 0x848, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d57, "I7_SB_UNK_0000_0d57", 0x866, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d58, "I7_SB_UNK_0000_0d58", 0x83c, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d59, "I7_SB_UNK_0000_0d59", 0x83c, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d64, "I7_SB_UNK_0000_0d64", 0, 0x3, UINT64_C(0xfffffffffffcfefc)),
+ MVX(0x00000d70, "I7_SB_UNK_0000_0d70", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d71, "I7_SB_UNK_0000_0d71", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d72, "I7_SB_UNK_0000_0d72", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d73, "I7_SB_UNK_0000_0d73", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d74, "I7_SB_UNK_0000_0d74", 0x20, 0, UINT64_C(0xffffffff00000300)),
+ MVX(0x00000d75, "I7_SB_UNK_0000_0d75", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000d76, "I7_SB_UNK_0000_0d76", 0x846, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d77, "I7_SB_UNK_0000_0d77", 0x90c, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d78, "I7_SB_UNK_0000_0d78", 0x846, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d79, "I7_SB_UNK_0000_0d79", 0x842, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d84, "I7_SB_UNK_0000_0d84", 0, 0x3, UINT64_C(0xfffffffffffcfefc)),
+ MVX(0x00000d90, "I7_SB_UNK_0000_0d90", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d91, "I7_SB_UNK_0000_0d91", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d92, "I7_SB_UNK_0000_0d92", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d93, "I7_SB_UNK_0000_0d93", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000d94, "I7_SB_UNK_0000_0d94", 0x20, 0, UINT64_C(0xffffffff00000300)),
+ MVX(0x00000d95, "I7_SB_UNK_0000_0d95", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000d96, "I7_SB_UNK_0000_0d96", 0x8c6, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d97, "I7_SB_UNK_0000_0d97", 0x840, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d98, "I7_SB_UNK_0000_0d98", 0x81a, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000d99, "I7_SB_UNK_0000_0d99", 0x910, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000da4, "I7_SB_UNK_0000_0da4", 0, 0x3, UINT64_C(0xfffffffffffcfefc)),
+ MVX(0x00000db0, "I7_SB_UNK_0000_0db0", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000db1, "I7_SB_UNK_0000_0db1", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000db2, "I7_SB_UNK_0000_0db2", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000db3, "I7_SB_UNK_0000_0db3", 0, 0x30000, UINT64_C(0xffffffff00200000)),
+ MVX(0x00000db4, "I7_SB_UNK_0000_0db4", 0x20, 0, UINT64_C(0xffffffff00000300)),
+ MVX(0x00000db5, "I7_SB_UNK_0000_0db5", 0, 0xf, UINT64_C(0xfffffffffffffff0)),
+ MVX(0x00000db6, "I7_SB_UNK_0000_0db6", 0x80c, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000db7, "I7_SB_UNK_0000_0db7", 0x81e, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000db8, "I7_SB_UNK_0000_0db8", 0x810, 0, UINT64_C(0xfffff00000000000)),
+ MVX(0x00000db9, "I7_SB_UNK_0000_0db9", 0x80a, 0, UINT64_C(0xfffff00000000000)),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xfffff800`030dac00 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xfffff800`030da940 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x4700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0xfffe0000 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xfffff880`061e6000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x7ff`fffde000 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, 0, ~(uint64_t)UINT32_MAX), /* value=0x0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Core(TM) i7-3960X CPU @ 3.30GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Core_i7_3960X =
+{
+ /*.pszName = */ "Intel Core i7-3960X",
+ /*.pszFullName = */ "Intel(R) Core(TM) i7-3960X CPU @ 3.30GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 45,
+ /*.uStepping = */ 6,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_Core7_SandyBridge,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_100MHZ,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 46,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Core_i7_3960X),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Core_i7_3960X)),
+ /*.enmUnknownCpuId = */ CPUMUKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
+ /*.DefUnknownCpuId = */ { 0x00000007, 0x00000340, 0x00000340, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Core_i7_3960X)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Core_i7_3960X),
+};
+
+#endif /* !VBOX_DB_Intel_Core_i7_3960X */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Pentium_4_3_00GHz.h b/src/VBox/VMM/VMMR3/cpus/Intel_Pentium_4_3_00GHz.h
new file mode 100644
index 00000000..d272330e
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Pentium_4_3_00GHz.h
@@ -0,0 +1,273 @@
+/* $Id: Intel_Pentium_4_3_00GHz.h $ */
+/** @file
+ * CPU database entry "Intel Pentium 4 3.00GHz".
+ * Generated at 2013-12-18T06:37:54Z by VBoxCpuReport v4.3.53r91376 on win.amd64.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VBOX_CPUDB_Intel_Pentium_4_3_00GHz
+#define VBOX_CPUDB_Intel_Pentium_4_3_00GHz
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Pentium(R) 4 CPU 3.00GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Pentium_4_3_00GHz[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000005, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00000f43, 0x00020800, 0x0000649d, 0xbfebfbff, 0 },
+ { 0x00000002, 0x00000000, 0x00000000, 0x605b5001, 0x00000000, 0x00000000, 0x007d7040, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, UINT32_MAX, 0x00004121, 0x01c0003f, 0x0000001f, 0x00000000, 0 },
+ { 0x00000004, 0x00000001, UINT32_MAX, 0x00004143, 0x01c0103f, 0x000007ff, 0x00000000, 0 },
+ { 0x00000004, 0x00000002, UINT32_MAX, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000000, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x20100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x20202020, 0x6e492020, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x286c6574, 0x50202952, 0x69746e65, 0x52286d75, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x20342029, 0x20555043, 0x30302e33, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x08006040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003024, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Pentium(R) 4 CPU 3.00GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Pentium_4_3_00GHz[] =
+{
+ MFO(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr), /* value=0xc55df88 */
+ MFO(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType), /* value=0xbe000300`1008081f */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0x40 */
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x1ac`2077a134 */
+ MFV(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x12000000000000)),
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0x600, UINT64_C(0xffffff00000000ff)),
+ MFX(0x0000002a, "P4_EBC_HARD_POWERON", IntelP4EbcHardPowerOn, IntelP4EbcHardPowerOn, 0, UINT64_MAX, 0), /* value=0x0 */
+ MFX(0x0000002b, "P4_EBC_SOFT_POWERON", IntelP4EbcSoftPowerOn, IntelP4EbcSoftPowerOn, 0x7e, UINT64_C(0xffffffffffffff80), 0), /* value=0x7e */
+ MFX(0x0000002c, "P4_EBC_FREQUENCY_ID", IntelP4EbcFrequencyId, IntelP4EbcFrequencyId, 0xf12010f, UINT64_MAX, 0), /* value=0xf12010f */
+ MVX(0x00000039, "C2_UNK_0000_0039", 0x1, 0x1f, ~(uint64_t)UINT32_MAX),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, IgnoreWrite),
+ MVX(0x00000080, "P4_UNK_0000_0080", 0, ~(uint64_t)UINT32_MAX, UINT32_MAX),
+ MFX(0x0000008b, "IA32_BIOS_SIGN_ID", Ia32BiosSignId, Ia32BiosSignId, 0, UINT32_MAX, 0), /* value=0x5`00000000 */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MFX(0x00000119, "BBL_CR_CTL", IntelBblCrCtl, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, 0, UINT64_C(0xffff800000000000)), /* value=0x0 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, 0, UINT64_C(0xffff800000000000)), /* value=0x0 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x180204, 0, 0), /* value=0x180204 */
+ MFN(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus), /* value=0x0 */
+ MVX(0x00000180, "MSR_MCG_RAX", 0, 0, UINT64_MAX),
+ MVX(0x00000181, "MSR_MCG_RBX", 0, 0, UINT64_MAX),
+ MVX(0x00000182, "MSR_MCG_RCX", 0, 0, UINT64_MAX),
+ MVX(0x00000183, "MSR_MCG_RDX", 0, 0, UINT64_MAX),
+ MVX(0x00000184, "MSR_MCG_RSI", 0, 0, UINT64_MAX),
+ MVX(0x00000185, "MSR_MCG_RDI", 0, 0, UINT64_MAX),
+ MFX(0x00000186, "MSR_MCG_RBP", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0, 0, UINT64_MAX), /* value=0x0 */
+ MFX(0x00000187, "MSR_MCG_RSP", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0, 0, UINT64_MAX), /* value=0x0 */
+ MVX(0x00000188, "MSR_MCG_RFLAGS", 0, 0, UINT64_MAX),
+ MVX(0x00000189, "MSR_MCG_RIP", 0, 0, UINT64_MAX),
+ MVX(0x0000018a, "MSR_MCG_MISC", 0, 0, UINT64_MAX),
+ MVX(0x0000018b, "MSR_MCG_RESERVED1", 0, 0, UINT64_MAX),
+ MVX(0x0000018c, "MSR_MCG_RESERVED2", 0, 0, UINT64_MAX),
+ MVX(0x0000018d, "MSR_MCG_RESERVED3", 0, 0, UINT64_MAX),
+ MVX(0x0000018e, "MSR_MCG_RESERVED4", 0, 0, UINT64_MAX),
+ MVX(0x0000018f, "MSR_MCG_RESERVED5", 0, 0, UINT64_MAX),
+ MVX(0x00000190, "MSR_MCG_R8", 0, 0, UINT64_MAX),
+ MVX(0x00000191, "MSR_MCG_R9", 0, 0, UINT64_MAX),
+ MVX(0x00000192, "MSR_MCG_R10", 0, 0, UINT64_MAX),
+ MVX(0x00000193, "MSR_MCG_R11", 0, 0, UINT64_MAX),
+ MVX(0x00000194, "MSR_MCG_R12", 0, 0, UINT64_MAX),
+ MVX(0x00000195, "MSR_MCG_R13", 0, 0, UINT64_MAX),
+ MVX(0x00000196, "MSR_MCG_R14", 0, 0, UINT64_MAX),
+ MVX(0x00000197, "MSR_MCG_R15", 0, 0, UINT64_MAX),
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, Ia32PerfStatus, UINT64_C(0xf2d00000f2d), UINT64_MAX, 0), /* value=0xf2d`00000f2d */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0xf2d, 0, 0), /* Might bite. value=0xf2d */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0, UINT64_C(0xffffffffffffffe1), 0), /* value=0x0 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0, UINT64_C(0xfffffffffffffffc), 0), /* value=0x0 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, 0, UINT64_C(0xfffffffffffffff5), 0), /* value=0x0 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0xe2d, 0, 0), /* value=0xe2d */
+ MVX(0x0000019e, "P6_UNK_0000_019e", 0, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0x0000019f, "P6_UNK_0000_019f", UINT64_C(0x32050500000101), UINT64_C(0xff000000fff0c0c0), 0),
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x22850089, 0x20800080, UINT64_C(0xfffffffbdc10f800)), /* value=0x22850089 */
+ MVX(0x000001a1, "MSR_PLATFORM_BRV", 0, UINT64_C(0xfffffffffffcc0c0), 0),
+ MFX(0x000001a2, "P4_UNK_0000_01a2", IntelI7TemperatureTarget, ReadOnly, 0x61048, 0, 0), /* value=0x61048 */
+ MFO(0x000001d7, "MSR_LER_FROM_LIP", P6LastIntFromIp), /* value=0x0 */
+ MFO(0x000001d8, "MSR_LER_TO_LIP", P6LastIntToIp), /* value=0x0 */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffffff80)), /* value=0x0 */
+ MFX(0x000001da, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, UINT64_C(0xfffffffffffffff0), 0), /* value=0x0 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffffff00000007ff)), /* value=0xf`c0000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x3f600000 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffffff00000007ff)), /* value=0xf`ffe00800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x3f800000 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffffff00000007ff)), /* value=0xf`ff800800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffffff0000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffffff00000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ MVX(0x00000300, "P4_MSR_BPU_COUNTER0", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000301, "P4_MSR_BPU_COUNTER1", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000302, "P4_MSR_BPU_COUNTER2", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000303, "P4_MSR_BPU_COUNTER3", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000304, "P4_MSR_MS_COUNTER0", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000305, "P4_MSR_MS_COUNTER1", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000306, "P4_MSR_MS_COUNTER2", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000307, "P4_MSR_MS_COUNTER3", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000308, "P4_MSR_FLAME_COUNTER0", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000309, "P4_MSR_FLAME_COUNTER1", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x0000030a, "P4_MSR_FLAME_COUNTER2", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x0000030b, "P4_MSR_FLAME_COUNTER3", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x0000030c, "P4_MSR_IQ_COUNTER0", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x0000030d, "P4_MSR_IQ_COUNTER1", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x0000030e, "P4_MSR_IQ_COUNTER2", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x0000030f, "P4_MSR_IQ_COUNTER3", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000310, "P4_MSR_IQ_COUNTER4", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000311, "P4_MSR_IQ_COUNTER5", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00000360, "P4_MSR_BPU_CCCR0", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000361, "P4_MSR_BPU_CCCR1", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000362, "P4_MSR_BPU_CCCR2", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000363, "P4_MSR_BPU_CCCR3", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000364, "P4_MSR_MS_CCCR0", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000365, "P4_MSR_MS_CCCR1", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000366, "P4_MSR_MS_CCCR2", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000367, "P4_MSR_MS_CCCR3", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000368, "P4_MSR_FLAME_CCCR0", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x00000369, "P4_MSR_FLAME_CCCR1", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x0000036a, "P4_MSR_FLAME_CCCR2", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x0000036b, "P4_MSR_FLAME_CCCR3", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x0000036c, "P4_MSR_IQ_CCCR0", 0, UINT64_C(0xffffffff000007ff), 0),
+ MVX(0x0000036d, "P4_MSR_IQ_CCCR1", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x0000036e, "P4_MSR_IQ_CCCR2", 0, UINT64_C(0xffffffff00000fff), 0),
+ MVX(0x0000036f, "P4_MSR_IQ_CCCR3", 0, UINT64_C(0xffffffff000007ff), 0),
+ MVX(0x00000370, "P4_MSR_IQ_CCCR4", 0, UINT64_C(0xffffffff000000ff), 0),
+ MVX(0x00000371, "P4_MSR_IQ_CCCR5", 0, UINT64_C(0xffffffff000000ff), 0),
+ MVX(0x000003a0, "P4_MSR_BSU_ESCR0", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003a1, "P4_MSR_BSU_ESCR1", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003a2, "P4_MSR_FSB_ESCR0", 0, UINT64_C(0xffffffff40000000), UINT32_C(0x80000000)),
+ MVX(0x000003a3, "P4_MSR_FSB_ESCR1", 0, UINT64_C(0xffffffff40000000), UINT32_C(0x80000000)),
+ MVX(0x000003a4, "P4_MSR_FIRM_ESCR0", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003a5, "P4_MSR_FIRM_ESCR1", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003a6, "P4_MSR_FLAME_ESCR0", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003a7, "P4_MSR_FLAME_ESCR1", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003a8, "P4_MSR_DAC_ESCR0", 0, UINT64_C(0xffffffff61fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003a9, "P4_MSR_DAC_ESCR1", 0, UINT64_C(0xffffffff61fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003aa, "P4_MSR_MOB_ESCR0", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003ab, "P4_MSR_MOB_ESCR1", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003ac, "P4_MSR_PMH_ESCR0", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003ad, "P4_MSR_PMH_ESCR1", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003ae, "P4_MSR_SAAT_ESCR0", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003af, "P4_MSR_SAAT_ESCR1", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003b0, "P4_MSR_U2L_ESCR0", 0, UINT64_C(0xffffffff71c001f0), UINT32_C(0x80000000)),
+ MVX(0x000003b1, "P4_MSR_U2L_ESCR1", 0, UINT64_C(0xffffffff71c001f0), UINT32_C(0x80000000)),
+ MVX(0x000003b2, "P4_MSR_BPU_ESCR0", 0, UINT64_C(0xffffffff61fc0000), UINT32_C(0x80000000)),
+ MVX(0x000003b3, "P4_MSR_BPU_ESCR1", 0, UINT64_C(0xffffffff61fc0000), UINT32_C(0x80000000)),
+ MVX(0x000003b4, "P4_MSR_IS_ESCR0", 0, UINT64_C(0xffffffff71fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003b5, "P4_MSR_IS_ESCR1", 0, UINT64_C(0xffffffff71fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003b6, "P4_MSR_ITLB_ESCR0", 0, UINT64_C(0xffffffff0ffff1e0), UINT32_C(0x80000000)),
+ MVX(0x000003b7, "P4_MSR_ITLB_ESCR1", 0, UINT64_C(0xffffffff0ffff1e0), UINT32_C(0x80000000)),
+ MVX(0x000003b8, "P4_MSR_CRU_ESCR0", 0, UINT64_C(0xffffffff71fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003b9, "P4_MSR_CRU_ESCR1", 0, UINT64_C(0xffffffff71fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003ba, "P4_MSR_IQ_ESCR0", 0, UINT64_C(0xffffffff7fffffff), UINT32_C(0x80000000)),
+ MVX(0x000003bb, "P4_MSR_IQ_ESCR1", 0, UINT64_C(0xffffffff7fffffff), UINT32_C(0x80000000)),
+ MVX(0x000003bc, "P4_MSR_RAT_ESCR0", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003bd, "P4_MSR_RAT_ESCR1", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003be, "P4_MSR_SSU_ESCR0", 0, ~(uint64_t)UINT32_MAX, UINT32_C(0x80000000)),
+ MVX(0x000003c0, "P4_MSR_MS_ESCR0", 0, UINT64_C(0xffffffff61ff81e0), UINT32_C(0x80000000)),
+ MVX(0x000003c1, "P4_MSR_MS_ESCR1", 0, UINT64_C(0xffffffff61ff81e0), UINT32_C(0x80000000)),
+ MVX(0x000003c2, "P4_MSR_TBPU_ESCR0", 0, UINT64_C(0xffffffff71fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003c3, "P4_MSR_TBPU_ESCR1", 0, UINT64_C(0xffffffff71fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003c4, "P4_MSR_TC_ESCR0", 0, UINT64_C(0xffffffff61f801f0), UINT32_C(0x80000000)),
+ MVX(0x000003c5, "P4_MSR_TC_ESCR1", 0, UINT64_C(0xffffffff61f801f0), UINT32_C(0x80000000)),
+ MVX(0x000003c8, "P4_MSR_IX_ESCR0", 0, UINT64_C(0xffffffff71fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003c9, "P4_MSR_IX_ESCR0", 0, UINT64_C(0xffffffff71fe01f0), UINT32_C(0x80000000)),
+ MVX(0x000003ca, "P4_MSR_ALF_ESCR0", 0, UINT64_C(0xffffffff700001f0), UINT32_C(0x80000000)),
+ MVX(0x000003cb, "P4_MSR_ALF_ESCR1", 0, UINT64_C(0xffffffff700001f0), UINT32_C(0x80000000)),
+ MVX(0x000003cc, "P4_MSR_CRU_ESCR2", 0, UINT64_C(0xffffffff61f001f0), UINT32_C(0x80000000)),
+ MVX(0x000003cd, "P4_MSR_CRU_ESCR3", 0, UINT64_C(0xffffffff61f001f0), UINT32_C(0x80000000)),
+ MVX(0x000003e0, "P4_MSR_CRU_ESCR4", 0, UINT64_C(0xffffffff71ff01f0), UINT32_C(0x80000000)),
+ MVX(0x000003e1, "P4_MSR_CRU_ESCR5", 0, UINT64_C(0xffffffff71ff01f0), UINT32_C(0x80000000)),
+ MVX(0x000003f0, "P4_MSR_TC_PRECISE_EVENT", 0xfc00, UINT64_C(0xfffffffffffc001f), 0),
+ MFX(0x000003f1, "IA32_PEBS_ENABLE", Ia32PebsEnable, Ia32PebsEnable, 0, UINT64_C(0xfffffffff8000000), 0), /* value=0x0 */
+ MVX(0x000003f2, "P4_MSR_PEBS_MATRIX_VERT", 0, UINT64_C(0xffffffffffffe000), 0),
+ MVX(0x000003f5, "P4_UNK_0000_03f5", 0, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0x000003f6, "P4_UNK_0000_03f6", 0, UINT64_C(0xffffffffffe00000), 0),
+ MVX(0x000003f7, "P4_UNK_0000_03f7", 0, UINT64_C(0xfffe000000000000), 0),
+ MVX(0x000003f8, "P4_UNK_0000_03f8", 0, UINT64_C(0xffffff000000003f), 0),
+ RFN(0x00000400, 0x0000040f, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFN(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea), /* value=0x0 */
+ RFN(0x00000680, 0x0000068f, "MSR_LASTBRANCH_n_FROM_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ RFN(0x000006c0, 0x000006cf, "MSR_LASTBRANCH_n_TO_IP", IntelLastBranchToN, IntelLastBranchToN),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xfffff800`654efdc0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xfffff800`654efb00 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x4700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0xeed1e000 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xfffff880`009bf000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x7f7`eed1c000 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Pentium(R) 4 CPU 3.00GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Pentium_4_3_00GHz =
+{
+ /*.pszName = */ "Intel Pentium 4 3.00GHz",
+ /*.pszFullName = */ "Intel(R) Pentium(R) 4 CPU 3.00GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 15,
+ /*.uModel = */ 4,
+ /*.uStepping = */ 3,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_NB_Prescott2M,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 36,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Pentium_4_3_00GHz),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Pentium_4_3_00GHz)),
+ /*.enmUnknownCpuId = */ CPUMUKNOWNCPUID_LAST_STD_LEAF,
+ /*.DefUnknownCpuId = */ { 0x00000040, 0x00000040, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Pentium_4_3_00GHz)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Pentium_4_3_00GHz),
+};
+
+#endif /* !VBOX_DB_Intel_Pentium_4_3_00GHz */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Pentium_M_processor_2_00GHz.h b/src/VBox/VMM/VMMR3/cpus/Intel_Pentium_M_processor_2_00GHz.h
new file mode 100644
index 00000000..fe287df1
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Pentium_M_processor_2_00GHz.h
@@ -0,0 +1,212 @@
+/* $Id: Intel_Pentium_M_processor_2_00GHz.h $ */
+/** @file
+ * CPU database entry "Intel Pentium M processor 2.00GHz".
+ * Generated at 2013-12-09T14:18:00Z by VBoxCpuReport v4.3.51r91027 on win.x86.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VBOX_CPUDB_Intel_Pentium_M_processor_2_00GHz
+#define VBOX_CPUDB_Intel_Pentium_M_processor_2_00GHz
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Pentium(R) M processor 2.00GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Pentium_M_processor_2_00GHz[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000002, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000006d6, 0x00000816, 0x00000180, 0xafe9f9bf, 0 },
+ { 0x00000002, 0x00000000, 0x00000000, 0x02b3b001, 0x000000f0, 0x00000000, 0x2c04307d, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000004, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x65746e49, 0x2952286c, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x6e655020, 0x6d756974, 0x20295228, 0x7270204d, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x7365636f, 0x20726f73, 0x30302e32, 0x007a4847, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Pentium(R) M processor 2.00GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Pentium_M_processor_2_00GHz[] =
+{
+ MFI(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr), /* value=0x0 */
+ MFI(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType), /* value=0x0 */
+ MFX(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x22`4d44782e */
+ MFV(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x140000d0248a28)),
+ MVX(0x00000018, "P6_UNK_0000_0018", 0, 0, 0),
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00100), UINT64_C(0xffffffff00000600), 0xff),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, IntelEblCrPowerOn, 0x45080000, UINT64_C(0xfffffffffff7ff7e), 0), /* value=0x45080000 */
+ MVX(0x0000002f, "P6_UNK_0000_002f", 0, UINT64_C(0xfffffffffffffff5), 0),
+ MVX(0x00000032, "P6_UNK_0000_0032", 0, UINT64_C(0xfffffffffffe0000), 0),
+ MVX(0x00000033, "TEST_CTL", 0, UINT64_C(0xffffffff40000000), 0),
+ MVX(0x00000034, "P6_UNK_0000_0034", 0x77ff, ~(uint64_t)UINT32_MAX, UINT32_C(0xfff80000)),
+ MVO(0x00000035, "P6_UNK_0000_0035", 0x300008),
+ MVX(0x0000003b, "P6_UNK_0000_003b", 0, UINT64_C(0xafffffffe), UINT64_C(0xfffffff500000001)),
+ MVO(0x0000003f, "P6_UNK_0000_003f", 0x4),
+ RFN(0x00000040, 0x00000047, "MSR_LASTBRANCH_n", IntelLastBranchFromToN, ReadOnly),
+ MVX(0x0000004a, "P6_UNK_0000_004a", 0, 0, 0), /* value=0x0 */
+ MVX(0x0000004b, "P6_UNK_0000_004b", 0, 0, 0), /* value=0x0 */
+ MVX(0x0000004c, "P6_UNK_0000_004c", 0, 0, 0), /* value=0x0 */
+ MVX(0x0000004d, "P6_UNK_0000_004d", 0, 0, 0), /* value=0xeb1cffbf`8918200a */
+ MVX(0x0000004e, "P6_UNK_0000_004e", 0, 0, 0), /* value=0x8204c60a`e8009512 */
+ MVX(0x0000004f, "P6_UNK_0000_004f", 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVI(0x00000050, "P6_UNK_0000_0050", 0), /* Villain? value=0x0 */
+ MVI(0x00000051, "P6_UNK_0000_0051", 0), /* Villain? value=0x0 */
+ MVI(0x00000052, "P6_UNK_0000_0052", 0), /* Villain? value=0x0 */
+ MVI(0x00000053, "P6_UNK_0000_0053", 0), /* Villain? value=0x0 */
+ MVI(0x00000054, "P6_UNK_0000_0054", 0), /* Villain? value=0x0 */
+ MVX(0x0000006c, "P6_UNK_0000_006c", 0, UINT64_C(0xffffffff00000082), 0),
+ MVX(0x0000006d, "P6_UNK_0000_006d", 0, UINT64_C(0xffffffff00000082), 0),
+ MVX(0x0000006e, "P6_UNK_0000_006e", 0, UINT64_C(0xffffffff00000082), 0),
+ MVO(0x0000006f, "P6_UNK_0000_006f", 0xadb),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, Ia32BiosUpdateTrigger),
+ MVX(0x00000088, "BBL_CR_D0", 0, 0, 0), /* value=0xfcaeffff`d779fd3e */
+ MVX(0x00000089, "BBL_CR_D1", 0, 0, 0), /* value=0xefffbcb7`ff77fbef */
+ MVX(0x0000008a, "BBL_CR_D2", 0, 0, 0), /* value=0xdfff3f2f`fb367d9f */
+ MVX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", UINT64_C(0x1800000000), 0, 0),
+ MVX(0x0000008c, "P6_UNK_0000_008c", 0, 0, 0), /* value=0xeffff3ff`ef39bfff */
+ MVX(0x0000008d, "P6_UNK_0000_008d", 0, 0, 0), /* value=0xf773adfb`ef3ff3fc */
+ MVX(0x0000008e, "P6_UNK_0000_008e", 0, 0, 0), /* value=0xfeb7f6ff`ebbffeff */
+ MVX(0x0000008f, "P6_UNK_0000_008f", 0, 0, 0), /* value=0xd6ffb7af`ffad9e7e */
+ MVX(0x00000090, "P6_UNK_0000_0090", 0, UINT64_C(0xfffffffffffffffa), 0), /* value=0x9ebdb4b5 */
+ MVX(0x000000ae, "P6_UNK_0000_00ae", UINT64_C(0x1000000000007efc), 0, 0),
+ MFX(0x000000c1, "IA32_PMC0", Ia32PmcN, Ia32PmcN, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x000000c2, "IA32_PMC1", Ia32PmcN, Ia32PmcN, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVI(0x000000c7, "P6_UNK_0000_00c7", UINT64_C(0x5a000000ac000000)),
+ MFX(0x000000cd, "MSR_FSB_FREQ", IntelP6FsbFrequency, ReadOnly, 0, 0, 0),
+ MVO(0x000000ce, "P6_UNK_0000_00ce", UINT64_C(0x2812140000000000)),
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MVX(0x00000116, "BBL_CR_ADDR", UINT32_C(0xfe7efff0), UINT64_C(0xffffffff0000000f), 0),
+ MVX(0x00000118, "BBL_CR_DECC", UINT64_C(0xc0000000c1ae9fda), UINT64_C(0xfffffff00000000), 0),
+ MFX(0x00000119, "BBL_CR_CTL", IntelBblCrCtl, IntelBblCrCtl, 0x8, UINT64_C(0xffffffffc00001ff), 0), /* value=0x8 */
+ MVI(0x0000011b, "P6_UNK_0000_011b", 0),
+ MFX(0x0000011e, "BBL_CR_CTL3", IntelBblCrCtl3, IntelBblCrCtl3, 0x34272b, UINT64_C(0xfffffffffffbfc1f), 0), /* value=0x34272b */
+ MVI(0x00000131, "P6_UNK_0000_0131", 0),
+ MVX(0x0000014e, "P6_UNK_0000_014e", 0xd31f40, UINT64_C(0xfffffffff000008f), 0),
+ MVI(0x0000014f, "P6_UNK_0000_014f", 0xd31f40),
+ MVX(0x00000150, "P6_UNK_0000_0150", 0, UINT64_C(0xffffffffdfffe07f), 0x20000000),
+ MVX(0x00000151, "P6_UNK_0000_0151", 0x3c531fc6, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x00000154, "P6_UNK_0000_0154", 0),
+ MVX(0x0000015b, "P6_UNK_0000_015b", 0, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x8 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0xf78af000 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x804de6f0 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x5, 0, 0), /* value=0x5 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RSN(0x00000186, 0x00000187, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00000194, "CLOCK_FLEX_MAX", 0, UINT64_C(0xfffffffffffee0c0), 0),
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0x612142806000612), 0, 0), /* value=0x6121428`06000612 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x612, 0, 0), /* Might bite. value=0x612 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0x2, UINT64_C(0xffffffffffffffe1), 0), /* value=0x2 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0, UINT64_C(0xfffffffffffffffc), 0), /* value=0x0 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, 0, UINT64_C(0xfffffffffffffffd), 0), /* value=0x0 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, Ia32Therm2Ctl, 0x10612, UINT64_C(0xfffffffffffee0c0), 0), /* value=0x10612 */
+ MVX(0x0000019e, "P6_UNK_0000_019e", 0, UINT64_C(0xffffffffffff0000), 0),
+ MVI(0x0000019f, "P6_UNK_0000_019f", 0),
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x111088, UINT64_C(0xffffffff001ffb77), 0), /* value=0x111088 */
+ MVX(0x000001a1, "P6_UNK_0000_01a1", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x000001aa, "P6_PIC_SENS_CFG", 0x3, UINT64_C(0xfffffffffffffffc), 0),
+ MVX(0x000001ae, "P6_UNK_0000_01ae", 0, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x000001af, "P6_UNK_0000_01af", 0x3ff, UINT64_C(0xfffffffffffffc00), 0),
+ MVO(0x000001c9, "TODO_0000_01c9", 0x8000000),
+ MVX(0x000001d3, "P6_UNK_0000_01d3", 0x8000, UINT64_C(0xffffffffffff7fff), 0),
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, UINT64_C(0xffffffffffffc200), 0), /* value=0x1 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0xaad05fa1 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xaad06480 */
+ MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp), /* value=0x7dba1245 */
+ MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp), /* value=0x806f5d54 */
+ MVO(0x000001e0, "MSR_ROB_CR_BKUPTMPDR6", 0xff0),
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000fff)), /* value=0xf`c0000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000ff8)), /* value=0x40000006 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000fff)), /* value=0xf`e0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000ff8)), /* value=0x5ff80000 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000fff)), /* value=0xf`fff80800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000fff)), /* value=0xf`00000000 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000fff)), /* value=0xf`00000000 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000fff)), /* value=0xf`00000000 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000fff)), /* value=0xf`00000000 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, UINT64_C(0xf00000000), UINT64_C(0xfffffff000000fff)), /* value=0xf`00000000 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RFN(0x00000400, 0x00000413, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVX(0x00001000, "P6_DEBUG_REGISTER_0", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001001, "P6_DEBUG_REGISTER_1", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001002, "P6_DEBUG_REGISTER_2", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001003, "P6_DEBUG_REGISTER_3", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001004, "P6_DEBUG_REGISTER_4", UINT32_C(0xffff0ff0), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001005, "P6_DEBUG_REGISTER_5", 0x400, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x00001006, "P6_DEBUG_REGISTER_6", UINT32_C(0xffff0ff0)), /* Villain? */
+ MVI(0x00001007, "P6_DEBUG_REGISTER_7", 0x400), /* Villain? */
+ MVO(0x0000103f, "P6_UNK_0000_103f", 0x4),
+ MVO(0x000010cd, "P6_UNK_0000_10cd", 0),
+ MFW(0x00002000, "P6_CR0", IntelP6CrN, IntelP6CrN, UINT64_C(0xffffffff00000010)), /* value=0x8001003b */
+ MFX(0x00002002, "P6_CR2", IntelP6CrN, IntelP6CrN, 0x2, ~(uint64_t)UINT32_MAX, 0), /* value=0xc30000 */
+ MFX(0x00002003, "P6_CR3", IntelP6CrN, IntelP6CrN, 0x3, ~(uint64_t)UINT32_MAX, 0), /* value=0x29765000 */
+ MFX(0x00002004, "P6_CR4", IntelP6CrN, IntelP6CrN, 0x4, ~(uint64_t)UINT32_MAX, 0), /* value=0x6d9 */
+ MVO(0x0000203f, "P6_UNK_0000_203f", 0x4),
+ MVO(0x000020cd, "P6_UNK_0000_20cd", 0),
+ MVO(0x0000303f, "P6_UNK_0000_303f", 0x4),
+ MVO(0x000030cd, "P6_UNK_0000_30cd", 0),
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Pentium(R) M processor 2.00GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Pentium_M_processor_2_00GHz =
+{
+ /*.pszName = */ "Intel Pentium M processor 2.00GHz",
+ /*.pszFullName = */ "Intel(R) Pentium(R) M processor 2.00GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 13,
+ /*.uStepping = */ 6,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_P6_M_Dothan,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 32,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Pentium_M_processor_2_00GHz),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Pentium_M_processor_2_00GHz)),
+ /*.enmUnknownCpuId = */ CPUMUKNOWNCPUID_LAST_STD_LEAF,
+ /*.DefUnknownCpuId = */ { 0x02b3b001, 0x000000f0, 0x00000000, 0x2c04307d },
+ /*.fMsrMask = */ UINT32_C(0x3fff),
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Pentium_M_processor_2_00GHz)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Pentium_M_processor_2_00GHz),
+};
+
+#endif /* !VBOX_DB_Intel_Pentium_M_processor_2_00GHz */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Intel_Xeon_X5482_3_20GHz.h b/src/VBox/VMM/VMMR3/cpus/Intel_Xeon_X5482_3_20GHz.h
new file mode 100644
index 00000000..45acf494
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Intel_Xeon_X5482_3_20GHz.h
@@ -0,0 +1,241 @@
+/* $Id: Intel_Xeon_X5482_3_20GHz.h $ */
+/** @file
+ * CPU database entry "Intel Xeon X5482 3.20GHz".
+ * Generated at 2013-12-16T12:10:52Z by VBoxCpuReport v4.3.53r91299 on darwin.amd64.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VBOX_CPUDB_Intel_Xeon_X5482_3_20GHz
+#define VBOX_CPUDB_Intel_Xeon_X5482_3_20GHz
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Intel(R) Xeon(R) CPU X5482 @ 3.20GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Intel_Xeon_X5482_3_20GHz[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000a, 0x756e6547, 0x6c65746e, 0x49656e69, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00010676, 0x04040800, 0x000ce3bd, 0xbfebfbff, 0 },
+ { 0x00000002, 0x00000000, 0x00000000, 0x05b0b101, 0x005657f0, 0x00000000, 0x2cb4304e, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, 0x00000000, 0x0c000121, 0x01c0003f, 0x0000003f, 0x00000001, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00002220, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000001, 0x00000002, 0x00000001, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000400, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x07280202, 0x00000000, 0x00000000, 0x00000503, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x20100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x65746e49, 0x2952286c, 0x6f655820, 0x2952286e, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x55504320, 0x20202020, 0x20202020, 0x58202020, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x32383435, 0x20402020, 0x30322e33, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x18008040, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003026, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Intel(R) Xeon(R) CPU X5482 @ 3.20GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Intel_Xeon_X5482_3_20GHz[] =
+{
+ MFO(0x00000000, "IA32_P5_MC_ADDR", Ia32P5McAddr), /* value=0x610010 */
+ MFX(0x00000001, "IA32_P5_MC_TYPE", Ia32P5McType, Ia32P5McType, 0, 0, UINT64_MAX), /* value=0x0 */
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, 0, UINT64_C(0xffffffffffff0000)), /* value=0x40 */
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x1358`d28c2c60 */
+ MFV(0x00000017, "IA32_PLATFORM_ID", Ia32PlatformId, ReadOnly, UINT64_C(0x18000088e40822)),
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xffffffc0000006ff)),
+ MVX(0x00000021, "C2_UNK_0000_0021", 0, 0, UINT64_C(0xffffffffffffffc0)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, IntelEblCrPowerOn, UINT32_C(0xc2383400), UINT64_C(0xffffffffdff7df00), 0), /* value=0xc2383400 */
+ MVX(0x00000032, "P6_UNK_0000_0032", 0, UINT64_C(0xffffffff01fe0000), 0),
+ MVX(0x00000033, "TEST_CTL", 0, UINT64_C(0xffffffff7fffffff), 0),
+ MVO(0x00000039, "C2_UNK_0000_0039", 0x7),
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x5 */
+ MVO(0x0000003f, "P6_UNK_0000_003f", 0),
+ RFN(0x00000040, 0x00000043, "MSR_LASTBRANCH_n_FROM_IP", IntelLastBranchToN, IntelLastBranchToN),
+ RFN(0x00000060, 0x00000063, "MSR_LASTBRANCH_n_TO_IP", IntelLastBranchFromN, IntelLastBranchFromN),
+ MFN(0x00000079, "IA32_BIOS_UPDT_TRIG", WriteOnly, IgnoreWrite),
+ MVX(0x0000008b, "BBL_CR_D3|BIOS_SIGN", UINT64_C(0x60b00000000), UINT32_MAX, 0),
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ MFX(0x000000a8, "C2_EMTTM_CR_TABLES_0", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, 0x612, UINT64_C(0xffffffffffff8000), 0), /* value=0x612 */
+ MFX(0x000000a9, "C2_EMTTM_CR_TABLES_1", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, 0x612, UINT64_C(0xffffffffffff8000), 0), /* value=0x612 */
+ MFX(0x000000aa, "C2_EMTTM_CR_TABLES_2", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, 0x612, UINT64_C(0xffffffffffff8000), 0), /* value=0x612 */
+ MFX(0x000000ab, "C2_EMTTM_CR_TABLES_3", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, 0x612, UINT64_C(0xffffffffffff8000), 0), /* value=0x612 */
+ MFX(0x000000ac, "C2_EMTTM_CR_TABLES_4", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, 0x612, UINT64_C(0xffffffffffff8000), 0), /* value=0x612 */
+ MFX(0x000000ad, "C2_EMTTM_CR_TABLES_5", IntelCore2EmttmCrTablesN, IntelCore2EmttmCrTablesN, 0x612, ~(uint64_t)UINT32_MAX, 0), /* value=0x612 */
+ RSN(0x000000c1, 0x000000c2, "IA32_PMCn", Ia32PmcN, Ia32PmcN, 0x0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x000000c7, "P6_UNK_0000_00c7", UINT64_C(0x2300000052000000)),
+ MFX(0x000000cd, "P6_MSR_FSB_FREQ", IntelP6FsbFrequency, ReadOnly, 0x806, 0, 0),
+ MVO(0x000000ce, "P6_UNK_0000_00ce", UINT64_C(0x1208227f7f0710)),
+ MVO(0x000000cf, "C2_UNK_0000_00cf", 0),
+ MVO(0x000000e0, "C2_UNK_0000_00e0", 0x18820f0),
+ MVO(0x000000e1, "C2_UNK_0000_00e1", UINT32_C(0xf0f00000)),
+ MFX(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl, IntelPkgCStConfigControl, 0, 0x404000, UINT64_C(0xfffffffffc001000)), /* value=0x202a01 */
+ MFX(0x000000e3, "C2_SMM_CST_MISC_INFO", IntelCore2SmmCStMiscInfo, IntelCore2SmmCStMiscInfo, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, 0, UINT64_C(0xffffffffff800000)), /* value=0x0 */
+ MVO(0x000000e5, "C2_UNK_0000_00e5", UINT32_C(0xd00208c8)),
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0x40`a0a41c60 */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x3a`cc470b98 */
+ MFX(0x000000ee, "C1_EXT_CONFIG", IntelCore1ExtConfig, IntelCore1ExtConfig, 0, UINT64_C(0xffffffffefc5ffff), 0), /* value=0x4000000`877d4b01 */
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0xd08, 0, 0), /* value=0xd08 */
+ MVX(0x00000116, "BBL_CR_ADDR", 0x3fc0, UINT64_C(0xffffff000000001f), 0),
+ MVX(0x00000118, "BBL_CR_DECC", 0xa7f99, UINT64_C(0xfffffffffff00000), 0),
+ MFN(0x0000011a, "BBL_CR_TRIG", WriteOnly, IgnoreWrite),
+ MVI(0x0000011b, "P6_UNK_0000_011b", 0),
+ MVX(0x0000011c, "C2_UNK_0000_011c", UINT32_C(0xe003b94d), UINT64_C(0xffffffff07c00000), 0),
+ MFX(0x0000011e, "BBL_CR_CTL3", IntelBblCrCtl3, IntelBblCrCtl3, UINT32_C(0xbe702111), UINT64_C(0xfffffffffef3fe9f), 0), /* value=0xbe702111 */
+ MVX(0x0000014e, "P6_UNK_0000_014e", 0x70375245, UINT64_C(0xffffffff00000080), 0),
+ MVI(0x0000014f, "P6_UNK_0000_014f", UINT32_C(0xffffba7f)),
+ MVX(0x00000151, "P6_UNK_0000_0151", 0x6b929082, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x0000015e, "C2_UNK_0000_015e", 0x6, 0, UINT64_C(0xfffffffffffffff0)),
+ MFX(0x0000015f, "C1_DTS_CAL_CTRL", IntelCore1DtsCalControl, IntelCore1DtsCalControl, 0, UINT64_C(0xffffffffffc0ffff), 0), /* value=0x822 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0xb */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, 0, UINT64_C(0xffff800000000000)), /* value=0xffffff82`0dce9190 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, 0, UINT64_C(0xffff800000000000)), /* value=0xffffff80`0d2ce720 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x806, 0, 0), /* value=0x806 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RSN(0x00000186, 0x00000187, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, 0, UINT64_C(0xffffffff00200000)),
+ MVO(0x00000193, "C2_UNK_0000_0193", 0),
+ MVX(0x00000194, "CLOCK_FLEX_MAX", 0x14822, UINT64_C(0xfffffffffffea0c0), 0),
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, ReadOnly, UINT64_C(0x822082206300622), 0, 0), /* value=0x8220822`06300622 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x822, 0, 0), /* Might bite. value=0x822 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0x2, 0, UINT64_C(0xffffffffffffffe1)), /* value=0x2 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0x10, 0, UINT64_C(0xffffffffff0000e0)), /* value=0x10 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, UINT32_C(0x883c0000), UINT32_C(0xf87f017f), UINT64_C(0xffffffff0780fc00)), /* value=0x883c0000 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0x612, 0, 0), /* value=0x612 */
+ MVX(0x0000019e, "P6_UNK_0000_019e", 0x2120000, UINT64_C(0xffffffffffff0000), 0),
+ MVI(0x0000019f, "P6_UNK_0000_019f", 0),
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, UINT64_C(0x4066a52489), UINT64_C(0x52600099f6), UINT64_C(0xffffff0019004000)), /* value=0x40`66a52489 */
+ MVX(0x000001a1, "P6_UNK_0000_01a1", 0, UINT64_C(0xffff000000000000), 0),
+ MFX(0x000001a2, "I7_MSR_TEMPERATURE_TARGET", IntelI7TemperatureTarget, ReadOnly, 0x1400, 0, 0), /* value=0x1400 */
+ MVX(0x000001aa, "P6_PIC_SENS_CFG", UINT32_C(0xfe7f042f), UINT64_C(0xffffffff7faf00af), 0),
+ MVX(0x000001bf, "C2_UNK_0000_01bf", 0x404, UINT64_C(0xffffffffffff0000), 0),
+ MFX(0x000001c9, "MSR_LASTBRANCH_TOS", IntelLastBranchTos, IntelLastBranchTos, 0, UINT64_C(0xfffffffffffffffe), 0), /* value=0x0 */
+ MVX(0x000001d3, "P6_UNK_0000_01d3", 0x8000, UINT64_C(0xffffffffffff7fff), 0),
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffffa03c)), /* value=0x1 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0xffffff7f`8f47ca6b */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xffffff80`0d2b24c0 */
+ MFX(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp, P6LastIntFromIp, 0, 0x1, UINT64_C(0xffff800000000000)), /* value=0xffffff80`0d2ba20f */
+ MFX(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp, P6LastIntToIp, 0, 0x10, UINT64_C(0xffff800000000000)), /* value=0xffffff80`0d2ba200 */
+ MVO(0x000001e0, "MSR_ROB_CR_BKUPTMPDR6", 0xff0),
+ MFX(0x000001f8, "IA32_PLATFORM_DCA_CAP", Ia32PlatformDcaCap, Ia32PlatformDcaCap, 0, UINT64_C(0xfffffffffffffffe), 0), /* value=0x0 */
+ MFO(0x000001f9, "IA32_CPU_DCA_CAP", Ia32CpuDcaCap), /* value=0x1 */
+ MFX(0x000001fa, "IA32_DCA_0_CAP", Ia32Dca0Cap, Ia32Dca0Cap, 0, UINT64_C(0xfffffffffefe17ff), 0), /* value=0xc01e489 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffffffc000000ff8)), /* value=0x80000000 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffffffc0000007ff)), /* value=0x3f`80000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffffffc000000ff8)), /* value=0x7fc00000 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffffffc0000007ff)), /* value=0x3f`ffc00800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffffffc000000ff8)), /* value=0x6 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffffffc0000007ff)), /* value=0x30`00000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffffffc000000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffffffc0000007ff)), /* value=0x0 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffffffc000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffffffc0000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffffffc000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffffffc0000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffffffc000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffffffc0000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffffffc000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffffffc0000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RSN(0x00000309, 0x0000030b, "IA32_FIXED_CTRn", Ia32FixedCtrN, Ia32FixedCtrN, 0x0, 0, UINT64_C(0xffffff0000000000)),
+ MFX(0x00000345, "IA32_PERF_CAPABILITIES", Ia32PerfCapabilities, ReadOnly, 0x10c2, 0, 0), /* value=0x10c2 */
+ MFX(0x0000038d, "IA32_FIXED_CTR_CTRL", Ia32FixedCtrCtrl, Ia32FixedCtrCtrl, 0, 0, UINT64_C(0xfffffffffffff444)), /* value=0x0 */
+ MFX(0x0000038e, "IA32_PERF_GLOBAL_STATUS", Ia32PerfGlobalStatus, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFN(0x0000038f, "IA32_PERF_GLOBAL_CTRL", Ia32PerfGlobalCtrl, Ia32PerfGlobalCtrl), /* value=0xffffffff`ffffffff */
+ MFO(0x00000390, "IA32_PERF_GLOBAL_OVF_CTRL", Ia32PerfGlobalOvfCtrl), /* value=0xffffffff`ffffffff */
+ MFX(0x000003f1, "IA32_PEBS_ENABLE", Ia32PebsEnable, Ia32PebsEnable, 0, UINT64_C(0xfffffffffffffffe), 0), /* value=0x0 */
+ RFN(0x00000400, 0x00000417, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFN(0x00000478, "CPUID1_FEATURE_MASK", IntelCpuId1FeatureMaskEcdx, IntelCpuId1FeatureMaskEcdx), /* value=0xffffffff`ffffffff */
+ MFX(0x00000480, "IA32_VMX_BASIC", Ia32VmxBase, ReadOnly, UINT64_C(0x5a08000000000d), 0, 0), /* value=0x5a0800`0000000d */
+ MFX(0x00000481, "IA32_VMX_PINBASED_CTLS", Ia32VmxPinbasedCtls, ReadOnly, UINT64_C(0x3f00000016), 0, 0), /* value=0x3f`00000016 */
+ MFX(0x00000482, "IA32_VMX_PROCBASED_CTLS", Ia32VmxProcbasedCtls, ReadOnly, UINT64_C(0xf7f9fffe0401e172), 0, 0), /* value=0xf7f9fffe`0401e172 */
+ MFX(0x00000483, "IA32_VMX_EXIT_CTLS", Ia32VmxExitCtls, ReadOnly, UINT64_C(0x3ffff00036dff), 0, 0), /* value=0x3ffff`00036dff */
+ MFX(0x00000484, "IA32_VMX_ENTRY_CTLS", Ia32VmxEntryCtls, ReadOnly, UINT64_C(0x3fff000011ff), 0, 0), /* value=0x3fff`000011ff */
+ MFX(0x00000485, "IA32_VMX_MISC", Ia32VmxMisc, ReadOnly, 0x403c0, 0, 0), /* value=0x403c0 */
+ MFX(0x00000486, "IA32_VMX_CR0_FIXED0", Ia32VmxCr0Fixed0, ReadOnly, UINT32_C(0x80000021), 0, 0), /* value=0x80000021 */
+ MFX(0x00000487, "IA32_VMX_CR0_FIXED1", Ia32VmxCr0Fixed1, ReadOnly, UINT32_MAX, 0, 0), /* value=0xffffffff */
+ MFX(0x00000488, "IA32_VMX_CR4_FIXED0", Ia32VmxCr4Fixed0, ReadOnly, 0x2000, 0, 0), /* value=0x2000 */
+ MFX(0x00000489, "IA32_VMX_CR4_FIXED1", Ia32VmxCr4Fixed1, ReadOnly, 0x27ff, 0, 0), /* value=0x27ff */
+ MFX(0x0000048a, "IA32_VMX_VMCS_ENUM", Ia32VmxVmcsEnum, ReadOnly, 0x2c, 0, 0), /* value=0x2c */
+ MFX(0x0000048b, "IA32_VMX_PROCBASED_CTLS2", Ia32VmxProcBasedCtls2, ReadOnly, UINT64_C(0x4100000000), 0, 0), /* value=0x41`00000000 */
+ MVX(0x000004f8, "C2_UNK_0000_04f8", 0, 0, 0),
+ MVX(0x000004f9, "C2_UNK_0000_04f9", 0, 0, 0),
+ MVX(0x000004fa, "C2_UNK_0000_04fa", 0, 0, 0),
+ MVX(0x000004fb, "C2_UNK_0000_04fb", 0, 0, 0),
+ MVX(0x000004fc, "C2_UNK_0000_04fc", 0, 0, 0),
+ MVX(0x000004fd, "C2_UNK_0000_04fd", 0, 0, 0),
+ MVX(0x000004fe, "C2_UNK_0000_04fe", 0, 0, 0),
+ MVX(0x000004ff, "C2_UNK_0000_04ff", 0, 0, 0),
+ MVX(0x00000590, "C2_UNK_0000_0590", 0, 0, 0),
+ MVX(0x00000591, "C2_UNK_0000_0591", 0, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0x000005a0, "C2_PECI_CTL", IntelCore2PeciControl, IntelCore2PeciControl, 0, UINT64_C(0xfffffffffffffffe), 0), /* value=0x1 */
+ MVI(0x000005a1, "C2_UNK_0000_05a1", 0x1),
+ MFX(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea, 0, 0, UINT64_C(0xffff800000000000)), /* value=0x0 */
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xfffffffffffff2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x1b0008`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xffffff80`0d2ce6c0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0x0 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x4700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x0 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffffff82`0dcfd000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x7fff`7c7511e0 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Intel(R) Xeon(R) CPU X5482 @ 3.20GHz.
+ */
+static CPUMDBENTRY const g_Entry_Intel_Xeon_X5482_3_20GHz =
+{
+ /*.pszName = */ "Intel Xeon X5482 3.20GHz",
+ /*.pszFullName = */ "Intel(R) Xeon(R) CPU X5482 @ 3.20GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_INTEL,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 23,
+ /*.uStepping = */ 6,
+ /*.enmMicroarch = */ kCpumMicroarch_Intel_Core2_Penryn,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_400MHZ,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 38,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Intel_Xeon_X5482_3_20GHz),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Intel_Xeon_X5482_3_20GHz)),
+ /*.enmUnknownCpuId = */ CPUMUKNOWNCPUID_LAST_STD_LEAF,
+ /*.DefUnknownCpuId = */ { 0x07280202, 0x00000000, 0x00000000, 0x00000503 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Intel_Xeon_X5482_3_20GHz)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Intel_Xeon_X5482_3_20GHz),
+};
+
+#endif /* !VBOX_DB_Intel_Xeon_X5482_3_20GHz */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/Makefile.kup b/src/VBox/VMM/VMMR3/cpus/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Makefile.kup
diff --git a/src/VBox/VMM/VMMR3/cpus/Quad_Core_AMD_Opteron_2384.h b/src/VBox/VMM/VMMR3/cpus/Quad_Core_AMD_Opteron_2384.h
new file mode 100644
index 00000000..04041a25
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/Quad_Core_AMD_Opteron_2384.h
@@ -0,0 +1,266 @@
+/* $Id: Quad_Core_AMD_Opteron_2384.h $ */
+/** @file
+ * CPU database entry "Quad-Core AMD Opteron 2384".
+ * Generated at 2013-12-09T21:56:56Z by VBoxCpuReport v4.3.51r91133 on win.amd64.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VBOX_CPUDB_Quad_Core_AMD_Opteron_2384
+#define VBOX_CPUDB_Quad_Core_AMD_Opteron_2384
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for Quad-Core AMD Opteron(tm) Processor 2384.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_Quad_Core_AMD_Opteron_2384[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000005, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x00100f42, 0x06040800, 0x00802009, 0x178bfbff, 0 },
+ { 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00000000, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x8000001b, 0x68747541, 0x444d4163, 0x69746e65, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00100f42, 0x00000d4f, 0x000037ff, 0xefd3fbff, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x64617551, 0x726f432d, 0x4d412065, 0x704f2044, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x6f726574, 0x6d74286e, 0x72502029, 0x7365636f, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x20726f73, 0x34383332, 0x00000000, 0x00000000, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0xff30ff10, 0xff30ff20, 0x40020140, 0x40020140, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x20800000, 0x42004200, 0x02008140, 0x0030b140, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000001f9, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003030, 0x00000000, 0x00002003, 0x00000000, 0 },
+ { 0x80000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000a, 0x00000000, 0x00000000, 0x00000001, 0x00000040, 0x00000000, 0x0000000f, 0 },
+ { 0x8000000b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000d, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000000f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000010, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000012, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000013, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000014, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000015, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000016, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000017, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000019, 0x00000000, 0x00000000, 0xf0300000, 0x60100000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001a, 0x00000000, 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x8000001b, 0x00000000, 0x00000000, 0x0000001f, 0x00000000, 0x00000000, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for Quad-Core AMD Opteron(tm) Processor 2384.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_Quad_Core_AMD_Opteron_2384[] =
+{
+ MAL(0x00000000, "IA32_P5_MC_ADDR", 0x00000402),
+ MAL(0x00000001, "IA32_P5_MC_TYPE", 0x00000401),
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0xbe`410ca9b6 */
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0, UINT64_C(0xffff0000000006ff)),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MVO(0x0000008b, "BBL_CR_D3|BIOS_SIGN", 0x1000086),
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0x508, 0, 0), /* value=0x508 */
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0x106, 0, 0), /* value=0x106 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, UINT64_C(0xfffffffffffffff8), 0), /* value=0x0 */
+ MFX(0x0000017b, "IA32_MCG_CTL", Ia32McgCtl, Ia32McgCtl, 0, UINT64_C(0xffffffffffffffc0), 0), /* value=0x3f */
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, UINT64_C(0xffffffffffffff80), 0x40), /* value=0x0 */
+ MFO(0x000001db, "P6_LAST_BRANCH_FROM_IP", P6LastBranchFromIp), /* value=0xfffff800`0245dd94 */
+ MFO(0x000001dc, "P6_LAST_BRANCH_TO_IP", P6LastBranchToIp), /* value=0xfffff800`0245e910 */
+ MFO(0x000001dd, "P6_LAST_INT_FROM_IP", P6LastIntFromIp), /* value=0x753d3416 */
+ MFO(0x000001de, "P6_LAST_INT_TO_IP", P6LastIntToIp), /* value=0x753ea130 */
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xffff000000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`80000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xffff000000000ff8)), /* value=0x80000006 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`c0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xffff000000000ff8)), /* value=0xc0000006 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xffff0000000007ff)), /* value=0xffff`f8000800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xffff000000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RFN(0x00000400, 0x00000417, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0x4d01, 0xfe, UINT64_C(0xffffffffffff8200)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xfffff800`0245dd00 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xfffff800`0245da80 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x14700 */
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0xfffe0000 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xfffffa60`01b8a000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x7ff`fffde000 */
+ MFX(0xc0000103, "AMD64_TSC_AUX", Amd64TscAux, Amd64TscAux, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RSN(0xc0000408, 0xc000040a, "AMD_10H_MC4_MISCn", AmdFam10hMc4MiscN, AmdFam10hMc4MiscN, 0, UINT64_C(0xff00f000ffffffff), 0),
+ RVI(0xc000040b, 0xc000040f, "AMD_10H_MC4_MISCn", 0),
+ RSN(0xc0010000, 0xc0010003, "AMD_K8_PERF_CTL_n", AmdK8PerfCtlN, AmdK8PerfCtlN, 0x0, UINT64_C(0xfffffcf000200000), 0),
+ RSN(0xc0010004, 0xc0010007, "AMD_K8_PERF_CTR_n", AmdK8PerfCtrN, AmdK8PerfCtrN, 0x0, UINT64_C(0xffff000000000000), 0),
+ MFX(0xc0010010, "AMD_K8_SYS_CFG", AmdK8SysCfg, AmdK8SysCfg, 0x760600, UINT64_C(0xffffffffff80f8ff), 0), /* value=0x760600 */
+ MFX(0xc0010015, "AMD_K8_HW_CFG", AmdK8HwCr, AmdK8HwCr, 0x1000030, UINT64_C(0xffffffff00000020), 0), /* value=0x1000030 */
+ MFW(0xc0010016, "AMD_K8_IORR_BASE_0", AmdK8IorrBaseN, AmdK8IorrBaseN, UINT64_C(0xffff000000000fe7)), /* value=0x1`b8210000 */
+ MFW(0xc0010017, "AMD_K8_IORR_MASK_0", AmdK8IorrMaskN, AmdK8IorrMaskN, UINT64_C(0xffff0000000007ff)), /* value=0x0 */
+ MFX(0xc0010018, "AMD_K8_IORR_BASE_1", AmdK8IorrBaseN, AmdK8IorrBaseN, 0x1, UINT64_C(0xffff000000000fe7), 0), /* value=0x0 */
+ MFX(0xc0010019, "AMD_K8_IORR_MASK_1", AmdK8IorrMaskN, AmdK8IorrMaskN, 0x1, UINT64_C(0xffff0000000007ff), 0), /* value=0x0 */
+ MFW(0xc001001a, "AMD_K8_TOP_MEM", AmdK8TopOfMemN, AmdK8TopOfMemN, UINT64_C(0xffff0000007fffff)), /* value=0xc8000000 */
+ MFX(0xc001001d, "AMD_K8_TOP_MEM2", AmdK8TopOfMemN, AmdK8TopOfMemN, 0x1, UINT64_C(0xffff0000007fffff), 0), /* value=0x2`38000000 */
+ MFN(0xc001001f, "AMD_K8_NB_CFG1", AmdK8NbCfg1, AmdK8NbCfg1), /* value=0x400000`00000008 */
+ MFN(0xc0010020, "AMD_K8_PATCH_LOADER", WriteOnly, AmdK8PatchLoader),
+ MFX(0xc0010022, "AMD_K8_MC_XCPT_REDIR", AmdK8McXcptRedir, AmdK8McXcptRedir, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RFN(0xc0010030, 0xc0010035, "AMD_K8_CPU_NAME_n", AmdK8CpuNameN, AmdK8CpuNameN),
+ MFX(0xc001003e, "AMD_K8_HTC", AmdK8HwThermalCtrl, AmdK8HwThermalCtrl, 0x327f0004, UINT64_C(0xffffffffc0008838), 0), /* value=0x327f0004 */
+ MFX(0xc001003f, "AMD_K8_STC", AmdK8SwThermalCtrl, AmdK8SwThermalCtrl, 0, UINT64_C(0xffffffffc00088c0), 0), /* value=0x30000000 */
+ MVO(0xc0010043, "AMD_K8_THERMTRIP_STATUS", 0x1830),
+ MFX(0xc0010044, "AMD_K8_MC_CTL_MASK_0", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x0, UINT64_C(0xffffffffffffff00), 0), /* value=0x80 */
+ MFX(0xc0010045, "AMD_K8_MC_CTL_MASK_1", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x1, ~(uint64_t)UINT32_MAX, 0), /* value=0x80 */
+ MFX(0xc0010046, "AMD_K8_MC_CTL_MASK_2", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x2, UINT64_C(0xfffffffffffff000), 0), /* value=0x200 */
+ MFX(0xc0010047, "AMD_K8_MC_CTL_MASK_3", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x3, UINT64_C(0xfffffffffffffffc), 0), /* value=0x0 */
+ MFX(0xc0010048, "AMD_K8_MC_CTL_MASK_4", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x4, UINT64_C(0xffffffffc0000000), 0), /* value=0x780400 */
+ MFX(0xc0010049, "AMD_K8_MC_CTL_MASK_5", AmdK8McCtlMaskN, AmdK8McCtlMaskN, 0x5, UINT64_C(0xfffffffffffffffe), 0), /* value=0x0 */
+ RFN(0xc0010050, 0xc0010053, "AMD_K8_SMI_ON_IO_TRAP_n", AmdK8SmiOnIoTrapN, AmdK8SmiOnIoTrapN),
+ MFX(0xc0010054, "AMD_K8_SMI_ON_IO_TRAP_CTL_STS", AmdK8SmiOnIoTrapCtlSts, AmdK8SmiOnIoTrapCtlSts, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0010055, "AMD_K8_INT_PENDING_MSG", AmdK8IntPendingMessage, AmdK8IntPendingMessage, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0010056, "AMD_K8_SMI_TRIGGER_IO_CYCLE", AmdK8SmiTriggerIoCycle, AmdK8SmiTriggerIoCycle, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x200242e */
+ MVX(0xc0010057, "AMD_10H_UNK_c001_0057", 0, 0, 0),
+ MFX(0xc0010058, "AMD_10H_MMIO_CFG_BASE_ADDR", AmdFam10hMmioCfgBaseAddr, AmdFam10hMmioCfgBaseAddr, 0, UINT64_C(0xffff0000000fffc0), 0), /* value=0xe0000021 */
+ MFX(0xc0010059, "AMD_10H_TRAP_CTL?", AmdFam10hTrapCtlMaybe, AmdFam10hTrapCtlMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVX(0xc001005a, "AMD_10H_UNK_c001_005a", 0, 0, 0),
+ MVX(0xc001005b, "AMD_10H_UNK_c001_005b", 0, 0, 0),
+ MVX(0xc001005c, "AMD_10H_UNK_c001_005c", 0, 0, 0),
+ MVX(0xc001005d, "AMD_10H_UNK_c001_005d", 0, 0, 0),
+ MVO(0xc0010060, "AMD_K8_BIST_RESULT", 0),
+ MFX(0xc0010061, "AMD_10H_P_ST_CUR_LIM", AmdFam10hPStateCurLimit, ReadOnly, 0x30, 0, 0), /* value=0x30 */
+ MFX(0xc0010062, "AMD_10H_P_ST_CTL", AmdFam10hPStateControl, AmdFam10hPStateControl, 0x1, 0, UINT64_C(0xfffffffffffffff8)), /* value=0x1 */
+ MFX(0xc0010063, "AMD_10H_P_ST_STS", AmdFam10hPStateStatus, ReadOnly, 0x1, 0, 0), /* value=0x1 */
+ MFX(0xc0010064, "AMD_10H_P_ST_0", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x800001e13000300b), 0, 0), /* value=0x800001e1`3000300b */
+ MFX(0xc0010065, "AMD_10H_P_ST_1", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x800001c840004004), 0, 0), /* value=0x800001c8`40004004 */
+ MFX(0xc0010066, "AMD_10H_P_ST_2", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x800001b64000404e), 0, 0), /* value=0x800001b6`4000404e */
+ MFX(0xc0010067, "AMD_10H_P_ST_3", AmdFam10hPStateN, AmdFam10hPStateN, UINT64_C(0x8000019d40004040), 0, 0), /* value=0x8000019d`40004040 */
+ MFX(0xc0010068, "AMD_10H_P_ST_4", AmdFam10hPStateN, AmdFam10hPStateN, 0, 0, 0), /* value=0x0 */
+ MFX(0xc0010070, "AMD_10H_COFVID_CTL", AmdFam10hCofVidControl, AmdFam10hCofVidControl, 0x40014004, UINT64_C(0xffffffff01b80000), 0), /* value=0x40014004 */
+ MFX(0xc0010071, "AMD_10H_COFVID_STS", AmdFam10hCofVidStatus, AmdFam10hCofVidStatus, UINT64_C(0x38b600c340014004), UINT64_MAX, 0), /* value=0x38b600c3`40014004 */
+ MFX(0xc0010074, "AMD_10H_CPU_WD_TMR_CFG", AmdFam10hCpuWatchdogTimer, AmdFam10hCpuWatchdogTimer, 0, UINT64_C(0xffffffffffffff80), 0), /* value=0x0 */
+ MFX(0xc0010111, "AMD_K8_SMM_BASE", AmdK8SmmBase, AmdK8SmmBase, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x98e00 */
+ MFX(0xc0010112, "AMD_K8_SMM_ADDR", AmdK8SmmAddr, AmdK8SmmAddr, 0, UINT64_C(0xffff00000001ffff), 0), /* value=0x0 */
+ MFX(0xc0010113, "AMD_K8_SMM_MASK", AmdK8SmmMask, AmdK8SmmMask, 0, UINT64_C(0xffff0000000188c0), 0), /* value=0x1 */
+ MFX(0xc0010114, "AMD_K8_VM_CR", AmdK8VmCr, AmdK8VmCr, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xffffffe0)), /* value=0x8 */
+ MFX(0xc0010115, "AMD_K8_IGNNE", AmdK8IgnNe, AmdK8IgnNe, 0, ~(uint64_t)UINT32_MAX, UINT32_C(0xfffffffe)), /* value=0x0 */
+ MFX(0xc0010117, "AMD_K8_VM_HSAVE_PA", AmdK8VmHSavePa, AmdK8VmHSavePa, 0, 0, UINT64_C(0xffff000000000fff)), /* value=0x0 */
+ MFN(0xc0010118, "AMD_10H_VM_LOCK_KEY", AmdFam10hVmLockKey, AmdFam10hVmLockKey), /* value=0x0 */
+ MFN(0xc0010119, "AMD_10H_SSM_LOCK_KEY", AmdFam10hSmmLockKey, AmdFam10hSmmLockKey), /* value=0x0 */
+ MFX(0xc001011a, "AMD_10H_LOCAL_SMI_STS", AmdFam10hLocalSmiStatus, AmdFam10hLocalSmiStatus, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc0010140, "AMD_10H_OSVW_ID_LEN", AmdFam10hOsVisWrkIdLength, AmdFam10hOsVisWrkIdLength, 0x1, 0, 0), /* value=0x1 */
+ MFN(0xc0010141, "AMD_10H_OSVW_STS", AmdFam10hOsVisWrkStatus, AmdFam10hOsVisWrkStatus), /* value=0x0 */
+ MFX(0xc0011000, "AMD_K7_MCODE_CTL", AmdK7MicrocodeCtl, AmdK7MicrocodeCtl, 0, ~(uint64_t)UINT32_MAX, 0x204), /* value=0x0 */
+ MFX(0xc0011001, "AMD_K7_APIC_CLUSTER_ID", AmdK7ClusterIdMaybe, AmdK7ClusterIdMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFN(0xc0011004, "AMD_K8_CPUID_CTL_STD01", AmdK8CpuIdCtlStd01hEdcx, AmdK8CpuIdCtlStd01hEdcx), /* value=0x802009`178bfbff */
+ MFN(0xc0011005, "AMD_K8_CPUID_CTL_EXT01", AmdK8CpuIdCtlExt01hEdcx, AmdK8CpuIdCtlExt01hEdcx), /* value=0x37ff`efd3fbff */
+ MFX(0xc0011006, "AMD_K7_DEBUG_STS?", AmdK7DebugStatusMaybe, AmdK7DebugStatusMaybe, 0, UINT64_C(0xffffffff00000080), 0), /* value=0x0 */
+ MFN(0xc0011007, "AMD_K7_BH_TRACE_BASE?", AmdK7BHTraceBaseMaybe, AmdK7BHTraceBaseMaybe), /* value=0x0 */
+ MFN(0xc0011008, "AMD_K7_BH_TRACE_PTR?", AmdK7BHTracePtrMaybe, AmdK7BHTracePtrMaybe), /* value=0x0 */
+ MFN(0xc0011009, "AMD_K7_BH_TRACE_LIM?", AmdK7BHTraceLimitMaybe, AmdK7BHTraceLimitMaybe), /* value=0x0 */
+ MFX(0xc001100a, "AMD_K7_HDT_CFG?", AmdK7HardwareDebugToolCfgMaybe, AmdK7HardwareDebugToolCfgMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0xc001100b, "AMD_K7_FAST_FLUSH_COUNT?", AmdK7FastFlushCountMaybe, AmdK7FastFlushCountMaybe, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x7c0 */
+ MFX(0xc001100c, "AMD_K7_NODE_ID", AmdK7NodeId, AmdK7NodeId, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MVX(0xc001100d, "AMD_K8_LOGICAL_CPUS_NUM?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001100e, "AMD_K8_WRMSR_BP?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001100f, "AMD_K8_WRMSR_BP_MASK?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc0011010, "AMD_K8_BH_TRACE_CTL?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0xc0011011, "AMD_K8_BH_TRACE_USRD?", 0), /* value=0x0 */
+ MVI(0xc0011012, "AMD_K7_UNK_c001_1012", UINT32_MAX),
+ MVI(0xc0011013, "AMD_K7_UNK_c001_1013", UINT64_MAX),
+ MVX(0xc0011014, "AMD_K8_XCPT_BP_RIP?", 0, 0, 0),
+ MVX(0xc0011015, "AMD_K8_XCPT_BP_RIP_MASK?", 0, 0, 0),
+ MVX(0xc0011016, "AMD_K8_COND_HDT_VAL?", 0, 0, 0),
+ MVX(0xc0011017, "AMD_K8_COND_HDT_VAL_MASK?", 0, 0, 0),
+ MVX(0xc0011018, "AMD_K8_XCPT_BP_CTL?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0xc001101d, "AMD_K8_NB_BIST?", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0xc001101e, "AMD_K8_THERMTRIP_2?", 0x1830), /* Villain? */
+ MVX(0xc001101f, "AMD_K8_NB_CFG?", UINT64_C(0x40000000000008), 0, 0),
+ MFX(0xc0011020, "AMD_K7_LS_CFG", AmdK7LoadStoreCfg, AmdK7LoadStoreCfg, 0, UINT64_C(0xfffe012000000000), 0), /* value=0x10`00001000 */
+ MFW(0xc0011021, "AMD_K7_IC_CFG", AmdK7InstrCacheCfg, AmdK7InstrCacheCfg, ~(uint64_t)UINT32_MAX), /* value=0x0 */
+ MFX(0xc0011022, "AMD_K7_DC_CFG", AmdK7DataCacheCfg, AmdK7DataCacheCfg, 0, UINT64_C(0xffc0000000000000), 0), /* value=0x1c94`49000000 */
+ MFN(0xc0011023, "AMD_K7_BU_CFG", AmdK7BusUnitCfg, AmdK7BusUnitCfg), /* Villain? value=0x10200020 */
+ MFX(0xc0011024, "AMD_K7_DEBUG_CTL_2?", AmdK7DebugCtl2Maybe, AmdK7DebugCtl2Maybe, 0, UINT64_C(0xffffffffffffff00), 0), /* value=0x0 */
+ MFN(0xc0011025, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMatchMaybe, AmdK7Dr0DataMatchMaybe), /* value=0x0 */
+ MFN(0xc0011026, "AMD_K7_DR0_DATA_MATCH?", AmdK7Dr0DataMaskMaybe, AmdK7Dr0DataMaskMaybe), /* value=0x0 */
+ MFX(0xc0011027, "AMD_K7_DR0_ADDR_MASK", AmdK7DrXAddrMaskN, AmdK7DrXAddrMaskN, 0x0, UINT64_C(0xfffffffffffff000), 0), /* value=0x0 */
+ MVX(0xc0011028, "AMD_10H_UNK_c001_1028", 0, UINT64_C(0xfffffffffffffff8), 0),
+ MVX(0xc0011029, "AMD_10H_UNK_c001_1029", 0, ~(uint64_t)UINT32_MAX, 0),
+ MFX(0xc001102a, "AMD_10H_BU_CFG2", AmdFam10hBusUnitCfg2, AmdFam10hBusUnitCfg2, 0, UINT64_C(0xfff00000c0000000), 0), /* value=0x40040`050000c0 */
+ MFX(0xc0011030, "AMD_10H_IBS_FETCH_CTL", AmdFam10hIbsFetchCtl, AmdFam10hIbsFetchCtl, 0, UINT64_C(0xfdfcffff00000000), 0), /* value=0x0 */
+ MFI(0xc0011031, "AMD_10H_IBS_FETCH_LIN_ADDR", AmdFam10hIbsFetchLinAddr), /* value=0xffffff1f`6ffffec0 */
+ MFI(0xc0011032, "AMD_10H_IBS_FETCH_PHYS_ADDR", AmdFam10hIbsFetchPhysAddr), /* value=0xffffbecf`eff1fec0 */
+ MFX(0xc0011033, "AMD_10H_IBS_OP_EXEC_CTL", AmdFam10hIbsOpExecCtl, AmdFam10hIbsOpExecCtl, 0, UINT64_C(0xfffffffffff00000), 0), /* value=0x0 */
+ MFN(0xc0011034, "AMD_10H_IBS_OP_RIP", AmdFam10hIbsOpRip, AmdFam10hIbsOpRip), /* value=0xffffcf06`409f2d93 */
+ MFI(0xc0011035, "AMD_10H_IBS_OP_DATA", AmdFam10hIbsOpData), /* value=0x3b`7701fe63 */
+ MFX(0xc0011036, "AMD_10H_IBS_OP_DATA2", AmdFam10hIbsOpData2, AmdFam10hIbsOpData2, 0, UINT64_C(0xffffffffffffffc8), 0), /* value=0x0 */
+ MFI(0xc0011037, "AMD_10H_IBS_OP_DATA3", AmdFam10hIbsOpData3), /* value=0x0 */
+ MFX(0xc0011038, "AMD_10H_IBS_DC_LIN_ADDR", AmdFam10hIbsDcLinAddr, AmdFam10hIbsDcLinAddr, 0, UINT64_C(0x7fffffffffff), 0), /* value=0x0 */
+ MFI(0xc0011039, "AMD_10H_IBS_DC_PHYS_ADDR", AmdFam10hIbsDcPhysAddr), /* value=0x0 */
+ MFO(0xc001103a, "AMD_10H_IBS_CTL", AmdFam10hIbsCtl), /* value=0x100 */
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for Quad-Core AMD Opteron(tm) Processor 2384.
+ */
+static CPUMDBENTRY const g_Entry_Quad_Core_AMD_Opteron_2384 =
+{
+ /*.pszName = */ "Quad-Core AMD Opteron 2384",
+ /*.pszFullName = */ "Quad-Core AMD Opteron(tm) Processor 2384",
+ /*.enmVendor = */ CPUMCPUVENDOR_AMD,
+ /*.uFamily = */ 16,
+ /*.uModel = */ 4,
+ /*.uStepping = */ 2,
+ /*.enmMicroarch = */ kCpumMicroarch_AMD_K10,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_UNKNOWN,
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 48,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_Quad_Core_AMD_Opteron_2384),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_Quad_Core_AMD_Opteron_2384)),
+ /*.enmUnknownCpuId = */ CPUMUKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_Quad_Core_AMD_Opteron_2384)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_Quad_Core_AMD_Opteron_2384),
+};
+
+#endif /* !VBOX_DB_Quad_Core_AMD_Opteron_2384 */
+
diff --git a/src/VBox/VMM/VMMR3/cpus/VIA_QuadCore_L4700_1_2_GHz.h b/src/VBox/VMM/VMMR3/cpus/VIA_QuadCore_L4700_1_2_GHz.h
new file mode 100644
index 00000000..75d5e085
--- /dev/null
+++ b/src/VBox/VMM/VMMR3/cpus/VIA_QuadCore_L4700_1_2_GHz.h
@@ -0,0 +1,400 @@
+/* $Id: VIA_QuadCore_L4700_1_2_GHz.h $ */
+/** @file
+ * CPU database entry "VIA QuadCore L4700 1.2+ GHz".
+ * Generated at 2013-12-20T14:40:07Z by VBoxCpuReport v4.3.53r91411 on linux.amd64.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VBOX_CPUDB_VIA_QuadCore_L4700_1_2_GHz
+#define VBOX_CPUDB_VIA_QuadCore_L4700_1_2_GHz
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * CPUID leaves for VIA QuadCore L4700 @ 1.2+ GHz.
+ */
+static CPUMCPUIDLEAF const g_aCpuIdLeaves_VIA_QuadCore_L4700_1_2_GHz[] =
+{
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000a, 0x746e6543, 0x736c7561, 0x48727561, 0 },
+ { 0x00000001, 0x00000000, 0x00000000, 0x000006fd, 0x06080800, 0x008863a9, 0xbfc9fbff, 0 },
+ { 0x00000002, 0x00000000, 0x00000000, 0x02b3b001, 0x00000000, 0x00000000, 0x2c04307d, 0 },
+ { 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000004, 0x00000000, 0x00000000, 0x1c000021, 0x03c0003f, 0x0000003f, 0x00000000, 0 },
+ { 0x00000005, 0x00000000, 0x00000000, 0x00000040, 0x00000040, 0x00000003, 0x00022220, 0 },
+ { 0x00000006, 0x00000000, 0x00000000, 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x00000009, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x0000000a, 0x00000000, 0x00000000, 0x06280202, 0x00000000, 0x00000000, 0x00000503, 0 },
+ { 0x80000000, 0x00000000, 0x00000000, 0x80000008, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x20100800, 0 },
+ { 0x80000002, 0x00000000, 0x00000000, 0x20202020, 0x20202020, 0x20202020, 0x20202020, 0 },
+ { 0x80000003, 0x00000000, 0x00000000, 0x49562020, 0x75512041, 0x6f436461, 0x4c206572, 0 },
+ { 0x80000004, 0x00000000, 0x00000000, 0x30303734, 0x31204020, 0x202b322e, 0x007a4847, 0 },
+ { 0x80000005, 0x00000000, 0x00000000, 0x00000000, 0x08800880, 0x40100140, 0x40100140, 0 },
+ { 0x80000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x04008140, 0x00000000, 0 },
+ { 0x80000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0x80000008, 0x00000000, 0x00000000, 0x00003024, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0xc0000000, 0x00000000, 0x00000000, 0xc0000004, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0xc0000001, 0x00000000, 0x00000000, 0x000006fd, 0x00000000, 0x00000000, 0x1ec03dcc, 0 },
+ { 0xc0000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0xc0000003, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0 },
+ { 0xc0000004, 0x00000000, 0x00000000, 0x000fffb7, 0x08000955, 0x08530954, 0x00000000, 0 },
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+#ifndef CPUM_DB_STANDALONE
+/**
+ * MSR ranges for VIA QuadCore L4700 @ 1.2+ GHz.
+ */
+static CPUMMSRRANGE const g_aMsrRanges_VIA_QuadCore_L4700_1_2_GHz[] =
+{
+ RVI(0x00000000, 0x00000005, "ZERO_0000_0000_THRU_0000_0005", 0),
+ MFX(0x00000006, "IA32_MONITOR_FILTER_LINE_SIZE", Ia32MonitorFilterLineSize, Ia32MonitorFilterLineSize, 0, UINT64_C(0xffffffffffff0000), 0), /* value=0x40 */
+ RVI(0x00000007, 0x0000000f, "ZERO_0000_0007_THRU_0000_000f", 0),
+ MFN(0x00000010, "IA32_TIME_STAMP_COUNTER", Ia32TimestampCounter, Ia32TimestampCounter), /* value=0x965`912e15ac */
+ RVI(0x00000011, 0x0000001a, "ZERO_0000_0011_THRU_0000_001a", 0),
+ MFX(0x0000001b, "IA32_APIC_BASE", Ia32ApicBase, Ia32ApicBase, UINT32_C(0xfee00800), 0x600, UINT64_C(0xfffffff0000000ff)),
+ RVI(0x0000001c, 0x00000029, "ZERO_0000_001c_THRU_0000_0029", 0),
+ MFX(0x0000002a, "EBL_CR_POWERON", IntelEblCrPowerOn, IntelEblCrPowerOn, 0x2580000, UINT64_MAX, 0), /* value=0x2580000 */
+ RVI(0x0000002b, 0x00000039, "ZERO_0000_002b_THRU_0000_0039", 0),
+ MFO(0x0000003a, "IA32_FEATURE_CONTROL", Ia32FeatureControl), /* value=0x5 */
+ RVI(0x0000003b, 0x00000078, "ZERO_0000_003b_THRU_0000_0078", 0),
+ RVI(0x0000007a, 0x0000008a, "ZERO_0000_007a_THRU_0000_008a", 0),
+ MFN(0x0000008b, "BBL_CR_D3|BIOS_SIGN", Ia32BiosSignId, Ia32BiosSignId), /* value=0xc`00000000 */
+ RVI(0x0000008c, 0x0000009a, "ZERO_0000_008c_THRU_0000_009a", 0),
+ MFO(0x0000009b, "IA32_SMM_MONITOR_CTL", Ia32SmmMonitorCtl), /* value=0x0 */
+ RVI(0x0000009c, 0x000000c0, "ZERO_0000_009c_THRU_0000_00c0", 0),
+ RSN(0x000000c1, 0x000000c3, "IA32_PMCn", Ia32PmcN, Ia32PmcN, 0x0, UINT64_C(0xffffff0000000000), 0), /* XXX: The range ended earlier than expected! */
+ RVI(0x000000c4, 0x000000cc, "ZERO_0000_00c4_THRU_0000_00cc", 0),
+ MFX(0x000000cd, "MSR_FSB_FREQ", IntelP6FsbFrequency, ReadOnly, 0, 0, 0),
+ RVI(0x000000ce, 0x000000e1, "ZERO_0000_00ce_THRU_0000_00e1", 0),
+ MFI(0x000000e2, "MSR_PKG_CST_CONFIG_CONTROL", IntelPkgCStConfigControl), /* value=0x6a204 */
+ MFX(0x000000e3, "C2_SMM_CST_MISC_INFO", IntelCore2SmmCStMiscInfo, IntelCore2SmmCStMiscInfo, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ MFX(0x000000e4, "MSR_PMG_IO_CAPTURE_BASE", IntelPmgIoCaptureBase, IntelPmgIoCaptureBase, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x0 */
+ RVI(0x000000e5, 0x000000e6, "ZERO_0000_00e5_THRU_0000_00e6", 0),
+ MFN(0x000000e7, "IA32_MPERF", Ia32MPerf, Ia32MPerf), /* value=0x2f4 */
+ MFN(0x000000e8, "IA32_APERF", Ia32APerf, Ia32APerf), /* value=0x2f2 */
+ RVI(0x000000e9, 0x000000fd, "ZERO_0000_00e9_THRU_0000_00fd", 0),
+ MFX(0x000000fe, "IA32_MTRRCAP", Ia32MtrrCap, ReadOnly, 0xd08, 0, 0), /* value=0xd08 */
+ RVI(0x000000ff, 0x0000011d, "ZERO_0000_00ff_THRU_0000_011d", 0),
+ MFX(0x0000011e, "BBL_CR_CTL3", IntelBblCrCtl3, IntelBblCrCtl3, 0, UINT64_MAX, 0), /* value=0x0 */
+ RVI(0x0000011f, 0x00000173, "ZERO_0000_011f_THRU_0000_0173", 0),
+ MFX(0x00000174, "IA32_SYSENTER_CS", Ia32SysEnterCs, Ia32SysEnterCs, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x10 */
+ MFX(0x00000175, "IA32_SYSENTER_ESP", Ia32SysEnterEsp, Ia32SysEnterEsp, 0, 0, UINT64_C(0xffff800000000000)), /* value=0x0 */
+ MFX(0x00000176, "IA32_SYSENTER_EIP", Ia32SysEnterEip, Ia32SysEnterEip, 0, 0, UINT64_C(0xffff800000000000)), /* value=0xffffffff`8166bfa0 */
+ RVI(0x00000177, 0x00000178, "ZERO_0000_0177_THRU_0000_0178", 0),
+ MFX(0x00000179, "IA32_MCG_CAP", Ia32McgCap, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFX(0x0000017a, "IA32_MCG_STATUS", Ia32McgStatus, Ia32McgStatus, 0, UINT64_C(0xfffffffffffffff8), 0), /* value=0x0 */
+ RVI(0x0000017b, 0x00000185, "ZERO_0000_017b_THRU_0000_0185", 0),
+ RSN(0x00000186, 0x00000188, "IA32_PERFEVTSELn", Ia32PerfEvtSelN, Ia32PerfEvtSelN, 0x0, UINT64_C(0xfffffffff8280000), 0), /* XXX: The range ended earlier than expected! */
+ RVI(0x00000189, 0x00000197, "ZERO_0000_0189_THRU_0000_0197", 0),
+ MFX(0x00000198, "IA32_PERF_STATUS", Ia32PerfStatus, Ia32PerfStatus, UINT64_C(0x853095408000955), UINT64_MAX, 0), /* value=0x8530954`08000955 */
+ MFX(0x00000199, "IA32_PERF_CTL", Ia32PerfCtl, Ia32PerfCtl, 0x954, 0, 0), /* Might bite. value=0x954 */
+ MFX(0x0000019a, "IA32_CLOCK_MODULATION", Ia32ClockModulation, Ia32ClockModulation, 0x2, UINT64_C(0xffffffffffffffe1), 0), /* value=0x2 */
+ MFX(0x0000019b, "IA32_THERM_INTERRUPT", Ia32ThermInterrupt, Ia32ThermInterrupt, 0, UINT64_C(0xffffffffff0000e0), 0), /* value=0x0 */
+ MFX(0x0000019c, "IA32_THERM_STATUS", Ia32ThermStatus, Ia32ThermStatus, 0x8320000, UINT64_MAX, 0), /* value=0x8320000 */
+ MFX(0x0000019d, "IA32_THERM2_CTL", Ia32Therm2Ctl, ReadOnly, 0x853, 0, 0), /* value=0x853 */
+ RVI(0x0000019e, 0x0000019f, "ZERO_0000_019e_THRU_0000_019f", 0),
+ MFX(0x000001a0, "IA32_MISC_ENABLE", Ia32MiscEnable, Ia32MiscEnable, 0x173c89, UINT64_C(0xffffffb87939c176), 0), /* value=0x173c89 */
+ RVI(0x000001a1, 0x000001d8, "ZERO_0000_01a1_THRU_0000_01d8", 0),
+ MFX(0x000001d9, "IA32_DEBUGCTL", Ia32DebugCtl, Ia32DebugCtl, 0, 0, UINT64_C(0xffffffffffffe03c)), /* value=0x1 */
+ RVI(0x000001da, 0x000001f1, "ZERO_0000_01da_THRU_0000_01f1", 0),
+ MFO(0x000001f2, "IA32_SMRR_PHYSBASE", Ia32SmrrPhysBase), /* value=0x0 */
+ MFO(0x000001f3, "IA32_SMRR_PHYSMASK", Ia32SmrrPhysMask), /* value=0x0 */
+ RVI(0x000001f4, 0x000001ff, "ZERO_0000_01f4_THRU_0000_01ff", 0),
+ MFX(0x00000200, "IA32_MTRR_PHYS_BASE0", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x0, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x6 */
+ MFX(0x00000201, "IA32_MTRR_PHYS_MASK0", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x0, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`80000800 */
+ MFX(0x00000202, "IA32_MTRR_PHYS_BASE1", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x1, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x70000000 */
+ MFX(0x00000203, "IA32_MTRR_PHYS_MASK1", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x1, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`f0000800 */
+ MFX(0x00000204, "IA32_MTRR_PHYS_BASE2", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x2, 0, UINT64_C(0xfffffff000000ff8)), /* value=0xd0000001 */
+ MFX(0x00000205, "IA32_MTRR_PHYS_MASK2", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x2, 0, UINT64_C(0xfffffff0000007ff)), /* value=0xf`ff800800 */
+ MFX(0x00000206, "IA32_MTRR_PHYS_BASE3", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x3, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000207, "IA32_MTRR_PHYS_MASK3", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x3, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x00000208, "IA32_MTRR_PHYS_BASE4", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x4, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x00000209, "IA32_MTRR_PHYS_MASK4", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x4, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020a, "IA32_MTRR_PHYS_BASE5", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x5, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020b, "IA32_MTRR_PHYS_MASK5", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x5, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020c, "IA32_MTRR_PHYS_BASE6", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x6, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020d, "IA32_MTRR_PHYS_MASK6", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x6, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ MFX(0x0000020e, "IA32_MTRR_PHYS_BASE7", Ia32MtrrPhysBaseN, Ia32MtrrPhysBaseN, 0x7, 0, UINT64_C(0xfffffff000000ff8)), /* value=0x0 */
+ MFX(0x0000020f, "IA32_MTRR_PHYS_MASK7", Ia32MtrrPhysMaskN, Ia32MtrrPhysMaskN, 0x7, 0, UINT64_C(0xfffffff0000007ff)), /* value=0x0 */
+ RVI(0x00000210, 0x0000024f, "ZERO_0000_0210_THRU_0000_024f", 0),
+ MFS(0x00000250, "IA32_MTRR_FIX64K_00000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix64K_00000),
+ RVI(0x00000251, 0x00000257, "ZERO_0000_0251_THRU_0000_0257", 0),
+ MFS(0x00000258, "IA32_MTRR_FIX16K_80000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_80000),
+ MFS(0x00000259, "IA32_MTRR_FIX16K_A0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix16K_A0000),
+ RVI(0x0000025a, 0x00000267, "ZERO_0000_025a_THRU_0000_0267", 0),
+ MFS(0x00000268, "IA32_MTRR_FIX4K_C0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C0000),
+ MFS(0x00000269, "IA32_MTRR_FIX4K_C8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_C8000),
+ MFS(0x0000026a, "IA32_MTRR_FIX4K_D0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D0000),
+ MFS(0x0000026b, "IA32_MTRR_FIX4K_D8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_D8000),
+ MFS(0x0000026c, "IA32_MTRR_FIX4K_E0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E0000),
+ MFS(0x0000026d, "IA32_MTRR_FIX4K_E8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_E8000),
+ MFS(0x0000026e, "IA32_MTRR_FIX4K_F0000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F0000),
+ MFS(0x0000026f, "IA32_MTRR_FIX4K_F8000", Ia32MtrrFixed, Ia32MtrrFixed, GuestMsrs.msr.MtrrFix4K_F8000),
+ RVI(0x00000270, 0x00000276, "ZERO_0000_0270_THRU_0000_0276", 0),
+ MFS(0x00000277, "IA32_PAT", Ia32Pat, Ia32Pat, Guest.msrPAT),
+ RVI(0x00000278, 0x000002fe, "ZERO_0000_0278_THRU_0000_02fe", 0),
+ MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, UINT64_C(0xfffffffffffff3f8)),
+ RVI(0x00000300, 0x00000308, "ZERO_0000_0300_THRU_0000_0308", 0),
+ RSN(0x00000309, 0x0000030a, "IA32_FIXED_CTRn", Ia32FixedCtrN, Ia32FixedCtrN, 0x0, UINT64_C(0xffffff0000000000), 0),
+ MFX(0x0000030b, "IA32_FIXED_CTR2", Ia32FixedCtrN, Ia32FixedCtrN, 0x2, UINT64_C(0xfffff8020a068061), 0), /* value=0x2d4 */
+ RVI(0x0000030c, 0x0000038c, "ZERO_0000_030c_THRU_0000_038c", 0),
+ MFX(0x0000038d, "IA32_FIXED_CTR_CTRL", Ia32FixedCtrCtrl, Ia32FixedCtrCtrl, 0, UINT64_C(0xfffffffffffff444), 0), /* value=0x0 */
+ MFX(0x0000038e, "IA32_PERF_GLOBAL_STATUS", Ia32PerfGlobalStatus, ReadOnly, 0, 0, 0), /* value=0x0 */
+ MFN(0x0000038f, "IA32_PERF_GLOBAL_CTRL", Ia32PerfGlobalCtrl, Ia32PerfGlobalCtrl), /* value=0xffffffff`ffffffff */
+ RVI(0x00000390, 0x0000047f, "ZERO_0000_0390_THRU_0000_047f", 0),
+ MFX(0x00000480, "IA32_VMX_BASIC", Ia32VmxBase, ReadOnly, UINT64_C(0x1a040000000007), 0, 0), /* value=0x1a0400`00000007 */
+ MFX(0x00000481, "IA32_VMX_PINBASED_CTLS", Ia32VmxPinbasedCtls, ReadOnly, UINT64_C(0x3f00000016), 0, 0), /* value=0x3f`00000016 */
+ MFX(0x00000482, "IA32_VMX_PROCBASED_CTLS", Ia32VmxProcbasedCtls, ReadOnly, UINT64_C(0x77f9fffe0401e172), 0, 0), /* value=0x77f9fffe`0401e172 */
+ MFX(0x00000483, "IA32_VMX_EXIT_CTLS", Ia32VmxExitCtls, ReadOnly, UINT64_C(0x3efff00036dff), 0, 0), /* value=0x3efff`00036dff */
+ MFX(0x00000484, "IA32_VMX_ENTRY_CTLS", Ia32VmxEntryCtls, ReadOnly, UINT64_C(0x1fff000011ff), 0, 0), /* value=0x1fff`000011ff */
+ MFX(0x00000485, "IA32_VMX_MISC", Ia32VmxMisc, ReadOnly, 0x403c0, 0, 0), /* value=0x403c0 */
+ MFX(0x00000486, "IA32_VMX_CR0_FIXED0", Ia32VmxCr0Fixed0, ReadOnly, UINT32_C(0x80000021), 0, 0), /* value=0x80000021 */
+ MFX(0x00000487, "IA32_VMX_CR0_FIXED1", Ia32VmxCr0Fixed1, ReadOnly, UINT32_MAX, 0, 0), /* value=0xffffffff */
+ MFX(0x00000488, "IA32_VMX_CR4_FIXED0", Ia32VmxCr4Fixed0, ReadOnly, 0x2000, 0, 0), /* value=0x2000 */
+ MFX(0x00000489, "IA32_VMX_CR4_FIXED1", Ia32VmxCr4Fixed1, ReadOnly, 0x27ff, 0, 0), /* value=0x27ff */
+ MFX(0x0000048a, "IA32_VMX_VMCS_ENUM", Ia32VmxVmcsEnum, ReadOnly, 0x2c, 0, 0), /* value=0x2c */
+ RVI(0x0000048b, 0x000005ff, "ZERO_0000_048b_THRU_0000_05ff", 0),
+ MFN(0x00000600, "IA32_DS_AREA", Ia32DsArea, Ia32DsArea), /* value=0x0 */
+ RVI(0x00000601, 0x00001106, "ZERO_0000_0601_THRU_0000_1106", 0),
+ MVI(0x00001107, "VIA_UNK_0000_1107", 0x2),
+ RVI(0x00001108, 0x0000110e, "ZERO_0000_1108_THRU_0000_110e", 0),
+ MVI(0x0000110f, "VIA_UNK_0000_110f", 0x2),
+ RVI(0x00001110, 0x00001152, "ZERO_0000_1110_THRU_0000_1152", 0),
+ MVO(0x00001153, "VIA_UNK_0000_1153", 0),
+ RVI(0x00001154, 0x000011ff, "ZERO_0000_1154_THRU_0000_11ff", 0),
+ MVX(0x00001200, "VIA_UNK_0000_1200", UINT64_C(0x8863a9bfc9fbff), 0x40000, 0),
+ MVX(0x00001201, "VIA_UNK_0000_1201", UINT64_C(0x120100800), UINT64_C(0xfffffff000000000), 0),
+ MVX(0x00001202, "VIA_UNK_0000_1202", 0x3dcc, UINT64_C(0xffffffffffffc233), 0),
+ MVX(0x00001203, "VIA_UNK_0000_1203", 0x18, 0, 0),
+ MVX(0x00001204, "VIA_UNK_0000_1204", UINT64_C(0x6fd00000424), 0, 0),
+ MVX(0x00001205, "VIA_UNK_0000_1205", UINT64_C(0x9890000000001), 0, 0),
+ MVX(0x00001206, "VIA_ALT_VENDOR_EBX", 0, 0, 0),
+ MVX(0x00001207, "VIA_ALT_VENDOR_ECDX", 0, 0, 0),
+ MVX(0x00001208, "VIA_UNK_0000_1208", 0, 0, 0),
+ MVX(0x00001209, "VIA_UNK_0000_1209", 0, 0, 0),
+ MVX(0x0000120a, "VIA_UNK_0000_120a", 0, 0, 0),
+ MVX(0x0000120b, "VIA_UNK_0000_120b", 0, 0, 0),
+ MVX(0x0000120c, "VIA_UNK_0000_120c", 0, 0, 0),
+ MVX(0x0000120d, "VIA_UNK_0000_120d", 0, 0, 0),
+ MVI(0x0000120e, "VIA_UNK_0000_120e", UINT64_C(0x820007b100002080)), /* Villain? */
+ MVX(0x0000120f, "VIA_UNK_0000_120f", UINT64_C(0x200000001a000000), 0x18000000, 0),
+ MVI(0x00001210, "ZERO_0000_1210", 0),
+ MVX(0x00001211, "VIA_UNK_0000_1211", 0, 0, 0),
+ MVX(0x00001212, "VIA_UNK_0000_1212", 0, 0, 0),
+ MVX(0x00001213, "VIA_UNK_0000_1213", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVO(0x00001214, "VIA_UNK_0000_1214", UINT64_C(0x5dd89e10ffffffff)),
+ RVI(0x00001215, 0x0000121f, "ZERO_0000_1215_THRU_0000_121f", 0),
+ MVO(0x00001220, "VIA_UNK_0000_1220", 0),
+ MVO(0x00001221, "VIA_UNK_0000_1221", 0x4dd2e713),
+ RVI(0x00001222, 0x0000122f, "ZERO_0000_1222_THRU_0000_122f", 0),
+ MVX(0x00001230, "VIA_UNK_0000_1230", UINT64_C(0x5dd89e10ffffffff), UINT32_C(0xfffffd68), 0),
+ MVX(0x00001231, "VIA_UNK_0000_1231", UINT64_C(0x7f9110bdc740), 0x200, 0),
+ MVO(0x00001232, "VIA_UNK_0000_1232", UINT64_C(0x2603448430479888)),
+ MVI(0x00001233, "VIA_UNK_0000_1233", UINT64_C(0xb39acda158793c27)), /* Villain? */
+ MVX(0x00001234, "VIA_UNK_0000_1234", 0, 0, 0),
+ MVX(0x00001235, "VIA_UNK_0000_1235", 0, 0, 0),
+ MVX(0x00001236, "VIA_UNK_0000_1236", UINT64_C(0x5dd89e10ffffffff), UINT32_C(0xfffffd68), 0),
+ MVX(0x00001237, "VIA_UNK_0000_1237", UINT32_C(0xffc00026), UINT64_C(0xffffffff06000001), 0),
+ MVO(0x00001238, "VIA_UNK_0000_1238", 0x2),
+ MVI(0x00001239, "VIA_UNK_0000_1239", 0), /* Villain? */
+ RVI(0x0000123a, 0x0000123f, "ZERO_0000_123a_THRU_0000_123f", 0),
+ MVO(0x00001240, "VIA_UNK_0000_1240", 0),
+ MVO(0x00001241, "VIA_UNK_0000_1241", UINT64_C(0x5dd89e10ffffffff)),
+ MVI(0x00001242, "ZERO_0000_1242", 0),
+ MVX(0x00001243, "VIA_UNK_0000_1243", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x00001244, "ZERO_0000_1244", 0),
+ MVX(0x00001245, "VIA_UNK_0000_1245", UINT64_C(0x3020400000000064), UINT64_C(0xf000000000000000), 0),
+ MVX(0x00001246, "VIA_UNK_0000_1246", UINT64_C(0x10000000000), 0, 0),
+ MVX(0x00001247, "VIA_UNK_0000_1247", 0, 0, 0),
+ MVX(0x00001248, "VIA_UNK_0000_1248", 0, 0, 0),
+ MVI(0x00001249, "VIA_UNK_0000_1249", 0), /* Villain? */
+ MVI(0x0000124a, "VIA_UNK_0000_124a", 0), /* Villain? */
+ RVI(0x0000124b, 0x00001300, "ZERO_0000_124b_THRU_0000_1300", 0),
+ MVX(0x00001301, "VIA_UNK_0000_1301", 0, 0, 0),
+ MVX(0x00001302, "VIA_UNK_0000_1302", 0, 0, 0),
+ MVX(0x00001303, "VIA_UNK_0000_1303", 0, 0, 0),
+ MVX(0x00001304, "VIA_UNK_0000_1304", 0, 0, 0),
+ MVX(0x00001305, "VIA_UNK_0000_1305", 0, 0, 0),
+ MVX(0x00001306, "VIA_UNK_0000_1306", 0, 0, 0),
+ MVX(0x00001307, "VIA_UNK_0000_1307", 0, UINT64_C(0xffffff0000000000), 0),
+ MVX(0x00001308, "VIA_UNK_0000_1308", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001309, "VIA_UNK_0000_1309", 0, ~(uint64_t)UINT32_MAX, 0),
+ RVI(0x0000130a, 0x0000130c, "ZERO_0000_130a_THRU_0000_130c", 0),
+ MVX(0x0000130d, "VIA_UNK_0000_130d", 0, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0x0000130e, "VIA_UNK_0000_130e", UINT64_MAX, 0, 0),
+ RVI(0x0000130f, 0x00001311, "ZERO_0000_130f_THRU_0000_1311", 0),
+ MVX(0x00001312, "VIA_UNK_0000_1312", 0, 0, 0),
+ RVI(0x00001313, 0x00001314, "ZERO_0000_1313_THRU_0000_1314", 0),
+ MVX(0x00001315, "VIA_UNK_0000_1315", 0, 0, 0),
+ MVI(0x00001316, "ZERO_0000_1316", 0),
+ MVX(0x00001317, "VIA_UNK_0000_1317", 0, 0, 0),
+ MVX(0x00001318, "VIA_UNK_0000_1318", 0, 0, 0),
+ MVI(0x00001319, "ZERO_0000_1319", 0),
+ MVX(0x0000131a, "VIA_UNK_0000_131a", 0, 0, 0),
+ MVX(0x0000131b, "VIA_UNK_0000_131b", 0x3c20954, 0, 0),
+ RVI(0x0000131c, 0x00001401, "ZERO_0000_131c_THRU_0000_1401", 0),
+ MVO(0x00001402, "VIA_UNK_0000_1402", 0x148c48),
+ MVX(0x00001403, "VIA_UNK_0000_1403", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x00001404, "VIA_UNK_0000_1404", 0), /* Villain? */
+ MVI(0x00001405, "VIA_UNK_0000_1405", UINT32_C(0x80fffffc)), /* Villain? */
+ MVX(0x00001406, "VIA_UNK_0000_1406", UINT32_C(0xc842c800), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001407, "VIA_UNK_0000_1407", UINT32_C(0x880400c0), ~(uint64_t)UINT32_MAX, 0),
+ RVI(0x00001408, 0x0000140f, "ZERO_0000_1408_THRU_0000_140f", 0),
+ MVX(0x00001410, "VIA_UNK_0000_1410", 0xfa0, UINT64_C(0xfffffffffff00000), 0),
+ MVX(0x00001411, "VIA_UNK_0000_1411", 0xa5a, UINT64_C(0xfffffffffff00000), 0),
+ MVI(0x00001412, "VIA_UNK_0000_1412", 0x4090),
+ MVI(0x00001413, "VIA_UNK_0000_1413", 0), /* Villain? */
+ MVX(0x00001414, "VIA_UNK_0000_1414", 0x5a, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001415, "VIA_UNK_0000_1415", 0x5a, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001416, "VIA_UNK_0000_1416", 0x6e, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001417, "VIA_UNK_0000_1417", 0x32, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001418, "VIA_UNK_0000_1418", 0xa, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001419, "VIA_UNK_0000_1419", 0x14, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141a, "VIA_UNK_0000_141a", 0x28, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141b, "VIA_UNK_0000_141b", 0x3c, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141c, "VIA_UNK_0000_141c", 0x69, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141d, "VIA_UNK_0000_141d", 0x69, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141e, "VIA_UNK_0000_141e", 0x69, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000141f, "VIA_UNK_0000_141f", 0x32, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x00001420, "VIA_UNK_0000_1420", 0x3, UINT64_C(0xffffffffffffc000), 0),
+ MVX(0x00001421, "VIA_UNK_0000_1421", 0x1f8, UINT64_C(0xfffffffffffc0000), 0),
+ MVX(0x00001422, "VIA_UNK_0000_1422", 0x1f4, UINT64_C(0xfffffffffffc0000), 0),
+ MVI(0x00001423, "VIA_UNK_0000_1423", 0xfffb7),
+ MVI(0x00001424, "VIA_UNK_0000_1424", 0x5b6),
+ MVI(0x00001425, "VIA_UNK_0000_1425", 0x65508),
+ MVI(0x00001426, "VIA_UNK_0000_1426", 0x843b),
+ MVX(0x00001427, "VIA_UNK_0000_1427", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001428, "VIA_UNK_0000_1428", 0x1ffffff, ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001429, "VIA_UNK_0000_1429", 0, UINT64_C(0xfffffffffff00000), 0),
+ MVI(0x0000142a, "VIA_UNK_0000_142a", 0x1c85d),
+ MVO(0x0000142b, "VIA_UNK_0000_142b", 0xf7e),
+ MVI(0x0000142c, "VIA_UNK_0000_142c", 0x20080), /* Villain? */
+ MVI(0x0000142d, "ZERO_0000_142d", 0),
+ MVI(0x0000142e, "VIA_UNK_0000_142e", 0x8000000), /* Villain? */
+ MVX(0x0000142f, "VIA_UNK_0000_142f", UINT64_C(0xffe57bea2ff3fdff), 0, 0),
+ RVI(0x00001430, 0x00001433, "ZERO_0000_1430_THRU_0000_1433", 0),
+ MVX(0x00001434, "VIA_UNK_0000_1434", 0x853f0e0, UINT64_C(0xffffffff7e7b0000), 0),
+ MVI(0x00001435, "VIA_UNK_0000_1435", 0x8000838), /* Villain? */
+ MVI(0x00001436, "VIA_UNK_0000_1436", 0x200004f), /* Villain? */
+ MVX(0x00001437, "VIA_UNK_0000_1437", 0, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x00001438, "VIA_UNK_0000_1438", 0x7004801c), /* Villain? */
+ MVI(0x00001439, "ZERO_0000_1439", 0),
+ MVX(0x0000143a, "VIA_UNK_0000_143a", 0x20000, ~(uint64_t)UINT32_MAX, 0),
+ MVI(0x0000143b, "ZERO_0000_143b", 0),
+ MVX(0x0000143c, "VIA_UNK_0000_143c", 0, UINT64_C(0xfffffffffffffe00), 0),
+ MVX(0x0000143d, "VIA_UNK_0000_143d", 0, UINT64_C(0xfffffffffffffe00), 0),
+ RVI(0x0000143e, 0x0000143f, "ZERO_0000_143e_THRU_0000_143f", 0),
+ MVX(0x00001440, "VIA_UNK_0000_1440", UINT32_C(0x80e00954), ~(uint64_t)UINT32_MAX, 0),
+ MVX(0x00001441, "VIA_UNK_0000_1441", 0xf00954, UINT64_C(0xffffffff00ff7f7f), 0),
+ MVX(0x00001442, "VIA_UNK_0000_1442", 0xf00954, UINT64_C(0xffffffff00ff7f7f), 0),
+ RVI(0x00001443, 0x00001448, "ZERO_0000_1443_THRU_0000_1448", 0),
+ MVI(0x00001449, "VIA_UNK_0000_1449", UINT64_C(0xfffff7e247)),
+ RVI(0x0000144a, 0x0000144f, "ZERO_0000_144a_THRU_0000_144f", 0),
+ MVX(0x00001450, "VIA_UNK_0000_1450", 0, UINT64_C(0xffffffffffffe000), 0),
+ MVX(0x00001451, "VIA_UNK_0000_1451", 0, UINT64_C(0xffffffffff000000), 0),
+ MVX(0x00001452, "VIA_UNK_0000_1452", 0, UINT64_C(0xffffffffff000000), 0),
+ MVI(0x00001453, "VIA_UNK_0000_1453", 0x3fffffff),
+ RVI(0x00001454, 0x0000145f, "ZERO_0000_1454_THRU_0000_145f", 0),
+ MVX(0x00001460, "VIA_UNK_0000_1460", 0, UINT64_C(0xffffffffffffffc0), 0),
+ MVX(0x00001461, "VIA_UNK_0000_1461", 0x7b, UINT64_C(0xffffffffffffff00), 0),
+ MVX(0x00001462, "VIA_UNK_0000_1462", 0x76, UINT64_C(0xffffffffffffff00), 0),
+ MVI(0x00001463, "VIA_UNK_0000_1463", 0x4a),
+ MVI(0x00001464, "ZERO_0000_1464", 0),
+ MVI(0x00001465, "VIA_UNK_0000_1465", 0xc6),
+ MVI(0x00001466, "VIA_UNK_0000_1466", UINT64_C(0x800000053)),
+ RVI(0x00001467, 0x0000146f, "ZERO_0000_1467_THRU_0000_146f", 0),
+ MVX(0x00001470, "VIA_UNK_0000_1470", UINT64_C(0x5dd89e10ffffffff), UINT32_C(0xfffffd68), 0),
+ MVI(0x00001471, "VIA_UNK_0000_1471", 0x2a000000),
+ RVI(0x00001472, 0x0000147f, "ZERO_0000_1472_THRU_0000_147f", 0),
+ MVI(0x00001480, "VIA_UNK_0000_1480", 0x3907),
+ MVI(0x00001481, "VIA_UNK_0000_1481", 0x12c0),
+ MVI(0x00001482, "VIA_UNK_0000_1482", 0x320),
+ MVI(0x00001483, "VIA_UNK_0000_1483", 0x3),
+ MVI(0x00001484, "VIA_UNK_0000_1484", 0x1647),
+ MVI(0x00001485, "VIA_UNK_0000_1485", 0x3b7),
+ MVI(0x00001486, "VIA_UNK_0000_1486", 0x443),
+ RVI(0x00001487, 0x0000148f, "ZERO_0000_1487_THRU_0000_148f", 0),
+ MVX(0x00001490, "VIA_UNK_0000_1490", 0xf5, UINT64_C(0xffffffffffffc000), 0),
+ MVX(0x00001491, "VIA_UNK_0000_1491", 0x200, UINT64_C(0xffffffffff000000), 0),
+ MVX(0x00001492, "VIA_UNK_0000_1492", 0, UINT64_C(0xffffffffff000000), 0),
+ MVX(0x00001493, "VIA_UNK_0000_1493", 0x4, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0x00001494, "VIA_UNK_0000_1494", 0x100, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0x00001495, "VIA_UNK_0000_1495", 0x100, UINT64_C(0xffffffffff000000), 0),
+ MVX(0x00001496, "VIA_UNK_0000_1496", 0x8, UINT64_C(0xffffffffffff0000), 0),
+ MVX(0x00001497, "VIA_UNK_0000_1497", 0, UINT64_C(0xffffffffff000000), 0),
+ MVX(0x00001498, "VIA_UNK_0000_1498", 0xffffff, UINT64_C(0xfffffffffffffe3c), 0),
+ MVI(0x00001499, "VIA_UNK_0000_1499", 0x2c5),
+ MVI(0x0000149a, "VIA_UNK_0000_149a", 0x1c1),
+ MVI(0x0000149b, "VIA_UNK_0000_149b", 0x2c5a),
+ MVI(0x0000149c, "VIA_UNK_0000_149c", 0x1c8f),
+ RVI(0x0000149d, 0x0000149e, "ZERO_0000_149d_THRU_0000_149e", 0),
+ MVI(0x0000149f, "VIA_UNK_0000_149f", 0x1c9),
+ RVI(0x000014a0, 0x00001522, "ZERO_0000_14a0_THRU_0000_1522", 0),
+ MFN(0x00001523, "VIA_UNK_0000_1523", WriteOnly, IgnoreWrite),
+ RVI(0x00001524, 0x00003179, "ZERO_0000_1524_THRU_0000_3179", 0),
+ MVO(0x0000317a, "VIA_UNK_0000_317a", UINT64_C(0x139f29749595b8)),
+ MVO(0x0000317b, "VIA_UNK_0000_317b", UINT64_C(0x5dd89e10ffffffff)),
+ MVI(0x0000317c, "ZERO_0000_317c", 0),
+ MFN(0x0000317d, "VIA_UNK_0000_317d", WriteOnly, IgnoreWrite),
+ MFN(0x0000317e, "VIA_UNK_0000_317e", WriteOnly, IgnoreWrite),
+ MVI(0x0000317f, "VIA_UNK_0000_317f", 0), /* Villain? */
+ RVI(0x00003180, 0x00003fff, "ZERO_0000_3180_THRU_0000_3fff", 0),
+ RVI(0x40000000, 0x40003fff, "ZERO_4000_0000_THRU_4000_3fff", 0),
+ RVI(0x80000000, 0x80000197, "ZERO_8000_0000_THRU_8000_0197", 0),
+ RVI(0x80000199, 0x80003fff, "ZERO_8000_0199_THRU_8000_3fff", 0),
+ RVI(0xc0000000, 0xc000007f, "ZERO_c000_0000_THRU_c000_007f", 0),
+ MFX(0xc0000080, "AMD64_EFER", Amd64Efer, Amd64Efer, 0xd01, 0x400, UINT64_C(0xffffffffffffd2fe)),
+ MFN(0xc0000081, "AMD64_STAR", Amd64SyscallTarget, Amd64SyscallTarget), /* value=0x230010`00000000 */
+ MFN(0xc0000082, "AMD64_STAR64", Amd64LongSyscallTarget, Amd64LongSyscallTarget), /* value=0xffffffff`81669af0 */
+ MFN(0xc0000083, "AMD64_STARCOMPAT", Amd64CompSyscallTarget, Amd64CompSyscallTarget), /* value=0xffffffff`8166c1d0 */
+ MFX(0xc0000084, "AMD64_SYSCALL_FLAG_MASK", Amd64SyscallFlagMask, Amd64SyscallFlagMask, 0, ~(uint64_t)UINT32_MAX, 0), /* value=0x3700 */
+ RVI(0xc0000085, 0xc00000ff, "ZERO_c000_0085_THRU_c000_00ff", 0),
+ MFN(0xc0000100, "AMD64_FS_BASE", Amd64FsBase, Amd64FsBase), /* value=0x7f91`10bdc740 */
+ MFN(0xc0000101, "AMD64_GS_BASE", Amd64GsBase, Amd64GsBase), /* value=0xffff8800`6fd80000 */
+ MFN(0xc0000102, "AMD64_KERNEL_GS_BASE", Amd64KernelGsBase, Amd64KernelGsBase), /* value=0x0 */
+ RVI(0xc0000104, 0xc0003fff, "ZERO_c000_0104_THRU_c000_3fff", 0),
+};
+#endif /* !CPUM_DB_STANDALONE */
+
+
+/**
+ * Database entry for VIA QuadCore L4700 @ 1.2+ GHz.
+ */
+static CPUMDBENTRY const g_Entry_VIA_QuadCore_L4700_1_2_GHz =
+{
+ /*.pszName = */ "VIA QuadCore L4700 1.2+ GHz",
+ /*.pszFullName = */ "VIA QuadCore L4700 @ 1.2+ GHz",
+ /*.enmVendor = */ CPUMCPUVENDOR_VIA,
+ /*.uFamily = */ 6,
+ /*.uModel = */ 15,
+ /*.uStepping = */ 13,
+ /*.enmMicroarch = */ kCpumMicroarch_VIA_Isaiah,
+ /*.uScalableBusFreq = */ CPUM_SBUSFREQ_267MHZ, /*??*/
+ /*.fFlags = */ 0,
+ /*.cMaxPhysAddrWidth= */ 36,
+ /*.paCpuIdLeaves = */ NULL_ALONE(g_aCpuIdLeaves_VIA_QuadCore_L4700_1_2_GHz),
+ /*.cCpuIdLeaves = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_VIA_QuadCore_L4700_1_2_GHz)),
+ /*.enmUnknownCpuId = */ CPUMUKNOWNCPUID_DEFAULTS,
+ /*.DefUnknownCpuId = */ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ /*.fMsrMask = */ UINT32_MAX,
+ /*.cMsrRanges = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_VIA_QuadCore_L4700_1_2_GHz)),
+ /*.paMsrRanges = */ NULL_ALONE(g_aMsrRanges_VIA_QuadCore_L4700_1_2_GHz),
+};
+
+#endif /* !VBOX_DB_VIA_QuadCore_L4700_1_2_GHz */
+