summaryrefslogtreecommitdiff
path: root/src/VBox/VMM/VMMAll/PGMAllPool.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/VBox/VMM/VMMAll/PGMAllPool.cpp')
-rw-r--r--src/VBox/VMM/VMMAll/PGMAllPool.cpp199
1 files changed, 100 insertions, 99 deletions
diff --git a/src/VBox/VMM/VMMAll/PGMAllPool.cpp b/src/VBox/VMM/VMMAll/PGMAllPool.cpp
index 952e8b59..30fbd014 100644
--- a/src/VBox/VMM/VMMAll/PGMAllPool.cpp
+++ b/src/VBox/VMM/VMMAll/PGMAllPool.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2012 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -31,7 +31,7 @@
#include <VBox/vmm/vm.h>
#include "PGMInline.h"
#include <VBox/disopcode.h>
-#include <VBox/vmm/hwacc_vmx.h>
+#include <VBox/vmm/hm_vmx.h>
#include <VBox/log.h>
#include <VBox/err.h>
@@ -46,13 +46,14 @@
RT_C_DECLS_BEGIN
DECLINLINE(unsigned) pgmPoolTrackGetShadowEntrySize(PGMPOOLKIND enmKind);
DECLINLINE(unsigned) pgmPoolTrackGetGuestEntrySize(PGMPOOLKIND enmKind);
+static void pgmPoolTrackClearPageUsers(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
static void pgmPoolTrackDeref(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
static int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable);
static void pgmPoolMonitorModifiedRemove(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
#ifndef IN_RING3
DECLEXPORT(int) pgmPoolAccessHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
#endif
-#ifdef LOG_ENABLED
+#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
static const char *pgmPoolPoolKindToStr(uint8_t enmKind);
#endif
#if 0 /*defined(VBOX_STRICT) && defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT)*/
@@ -423,7 +424,7 @@ void pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPag
}
#if 0 /* useful when running PGMAssertCR3(), a bit too troublesome for general use (TLBs). */
if ( uShw.pPD->a[iShw].n.u1Present
- && !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
+ && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
{
LogFlow(("pgmPoolMonitorChainChanging: iShw=%#x: %RX32 -> freeing it!\n", iShw, uShw.pPD->a[iShw].u));
# ifdef IN_RC /* TLB load - we're pushing things a bit... */
@@ -741,7 +742,7 @@ DECLINLINE(bool) pgmPoolMonitorIsReused(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pReg
{
#ifndef IN_RC
/** @todo could make this general, faulting close to rsp should be a safe reuse heuristic. */
- if ( HWACCMHasPendingIrq(pVM)
+ if ( HMHasPendingIrq(pVM)
&& (pRegFrame->rsp - pvFault) < 32)
{
/* Fault caused by stack writes while trying to inject an interrupt event. */
@@ -755,7 +756,7 @@ DECLINLINE(bool) pgmPoolMonitorIsReused(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pReg
LogFlow(("Reused instr %RGv %d at %RGv param1.fUse=%llx param1.reg=%d\n", pRegFrame->rip, pDis->pCurInstr->uOpcode, pvFault, pDis->Param1.fUse, pDis->Param1.Base.idxGenReg));
/* Non-supervisor mode write means it's used for something else. */
- if (CPUMGetGuestCPL(pVCpu) != 0)
+ if (CPUMGetGuestCPL(pVCpu) == 3)
return true;
switch (pDis->pCurInstr->uOpcode)
@@ -842,7 +843,7 @@ static int pgmPoolAccessHandlerFlush(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGM
else if (rc2 == VINF_EM_RESCHEDULE)
{
if (rc == VINF_SUCCESS)
- rc = rc2;
+ rc = VBOXSTRICTRC_VAL(rc2);
# ifndef IN_RING3
VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
# endif
@@ -977,11 +978,21 @@ DECLINLINE(int) pgmPoolAccessHandlerSimple(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool
* Clear all the pages. ASSUMES that pvFault is readable.
*/
#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
- uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
- pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->Param1));
+ uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
+#endif
+
+ uint32_t cbWrite = DISGetParamSize(pDis, &pDis->Param1);
+ if (cbWrite <= 8)
+ pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, cbWrite);
+ else
+ {
+ Assert(cbWrite <= 16);
+ pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, 8);
+ pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault + 8, pvFault + 8, cbWrite - 8);
+ }
+
+#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
-#else
- pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->Param1));
#endif
/*
@@ -1070,7 +1081,7 @@ DECLEXPORT(int) pgmPoolAccessHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE
#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
if (pPage->fDirty)
{
- Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH));
+ Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH));
pgmUnlock(pVM);
return VINF_SUCCESS; /* SMP guest case where we were blocking on the pgm lock while the same page was being marked dirty. */
}
@@ -1786,7 +1797,11 @@ void pgmPoolAddDirtyPage(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage)
}
Assert(pPool->cDirtyPages == RT_ELEMENTS(pPool->aDirtyPages) || pPool->aDirtyPages[pPool->idxFreeDirtyPage].uIdx == NIL_PGMPOOL_IDX);
- return;
+
+ /*
+ * Clear all references to this shadow table. See @bugref{7298}.
+ */
+ pgmPoolTrackClearPageUsers(pPool, pPage);
}
# endif /* !IN_RING3 */
@@ -2013,7 +2028,7 @@ static int pgmPoolCacheFreeOne(PPGMPOOL pPool, uint16_t iUser)
for (unsigned iLoop = 0; ; iLoop++)
{
uint16_t iToFree = pPool->iAgeTail;
- if (iToFree == iUser)
+ if (iToFree == iUser && iUser != NIL_PGMPOOL_IDX)
iToFree = pPool->aPages[iToFree].iAgePrev;
/* This is the alternative to the SyncCR3 pgmPoolCacheUsed calls.
if (pPool->aPages[iToFree].iUserHead != NIL_PGMPOOL_USER_INDEX)
@@ -2172,8 +2187,10 @@ static bool pgmPoolCacheReusedByKind(PGMPOOLKIND enmKind1, PGMPOOLKIND enmKind2)
* @param enmKind The kind of mapping.
* @param enmAccess Access type for the mapping (only relevant for big pages)
* @param fA20Enabled Whether the CPU has the A20 gate enabled.
- * @param iUser The shadow page pool index of the user table.
- * @param iUserTable The index into the user table (shadowed).
+ * @param iUser The shadow page pool index of the user table. This is
+ * NIL_PGMPOOL_IDX for root pages.
+ * @param iUserTable The index into the user table (shadowed). Ignored if
+ * root page
* @param ppPage Where to store the pointer to the page.
*/
static int pgmPoolCacheAlloc(PPGMPOOL pPool, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, bool fA20Enabled,
@@ -2201,7 +2218,9 @@ static int pgmPoolCacheAlloc(PPGMPOOL pPool, RTGCPHYS GCPhys, PGMPOOLKIND enmKin
*/
pgmPoolCacheUsed(pPool, pPage);
- int rc = pgmPoolTrackAddUser(pPool, pPage, iUser, iUserTable);
+ int rc = VINF_SUCCESS;
+ if (iUser != NIL_PGMPOOL_IDX)
+ rc = pgmPoolTrackAddUser(pPool, pPage, iUser, iUserTable);
if (RT_SUCCESS(rc))
{
Assert((PGMPOOLKIND)pPage->enmKind == enmKind);
@@ -2490,7 +2509,7 @@ static int pgmPoolMonitorInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
* the heap size should suffice. */
AssertFatalMsgRC(rc, ("PGMHandlerPhysicalRegisterEx %RGp failed with %Rrc\n", GCPhysPage, rc));
PVMCPU pVCpu = VMMGetCpu(pVM);
- AssertFatalMsg(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), ("fSyncFlags=%x syncff=%d\n", pVCpu->pgm.s.fSyncFlags, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)));
+ AssertFatalMsg(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), ("fSyncFlags=%x syncff=%d\n", pVCpu->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)));
}
pPage->fMonitored = true;
return rc;
@@ -2587,7 +2606,7 @@ static int pgmPoolMonitorFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
rc = PGMHandlerPhysicalDeregister(pVM, pPage->GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
AssertFatalRC(rc);
PVMCPU pVCpu = VMMGetCpu(pVM);
- AssertFatalMsg(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3),
+ AssertFatalMsg(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3),
("%#x %#x\n", pVCpu->pgm.s.fSyncFlags, pVM->fGlobalForcedActions));
}
pPage->fMonitored = false;
@@ -2804,43 +2823,49 @@ DECLINLINE(int) pgmPoolTrackInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS
LogFlow(("pgmPoolTrackInsert GCPhys=%RGp iUser=%d iUserTable=%x\n", GCPhys, iUser, iUserTable));
-#ifdef VBOX_STRICT
- /*
- * Check that the entry doesn't already exists.
- */
- if (pPage->iUserHead != NIL_PGMPOOL_USER_INDEX)
+ if (iUser != NIL_PGMPOOL_IDX)
{
- uint16_t i = pPage->iUserHead;
- do
+#ifdef VBOX_STRICT
+ /*
+ * Check that the entry doesn't already exists.
+ */
+ if (pPage->iUserHead != NIL_PGMPOOL_USER_INDEX)
{
- Assert(i < pPool->cMaxUsers);
- AssertMsg(paUsers[i].iUser != iUser || paUsers[i].iUserTable != iUserTable, ("%x %x vs new %x %x\n", paUsers[i].iUser, paUsers[i].iUserTable, iUser, iUserTable));
- i = paUsers[i].iNext;
- } while (i != NIL_PGMPOOL_USER_INDEX);
- }
+ uint16_t i = pPage->iUserHead;
+ do
+ {
+ Assert(i < pPool->cMaxUsers);
+ AssertMsg(paUsers[i].iUser != iUser || paUsers[i].iUserTable != iUserTable, ("%x %x vs new %x %x\n", paUsers[i].iUser, paUsers[i].iUserTable, iUser, iUserTable));
+ i = paUsers[i].iNext;
+ } while (i != NIL_PGMPOOL_USER_INDEX);
+ }
#endif
- /*
- * Find free a user node.
- */
- uint16_t i = pPool->iUserFreeHead;
- if (i == NIL_PGMPOOL_USER_INDEX)
- {
- rc = pgmPoolTrackFreeOneUser(pPool, iUser);
- if (RT_FAILURE(rc))
- return rc;
- i = pPool->iUserFreeHead;
+ /*
+ * Find free a user node.
+ */
+ uint16_t i = pPool->iUserFreeHead;
+ if (i == NIL_PGMPOOL_USER_INDEX)
+ {
+ rc = pgmPoolTrackFreeOneUser(pPool, iUser);
+ if (RT_FAILURE(rc))
+ return rc;
+ i = pPool->iUserFreeHead;
+ }
+
+ /*
+ * Unlink the user node from the free list,
+ * initialize and insert it into the user list.
+ */
+ pPool->iUserFreeHead = paUsers[i].iNext;
+ paUsers[i].iNext = NIL_PGMPOOL_USER_INDEX;
+ paUsers[i].iUser = iUser;
+ paUsers[i].iUserTable = iUserTable;
+ pPage->iUserHead = i;
}
+ else
+ pPage->iUserHead = NIL_PGMPOOL_USER_INDEX;
- /*
- * Unlink the user node from the free list,
- * initialize and insert it into the user list.
- */
- pPool->iUserFreeHead = paUsers[i].iNext;
- paUsers[i].iNext = NIL_PGMPOOL_USER_INDEX;
- paUsers[i].iUser = iUser;
- paUsers[i].iUserTable = iUserTable;
- pPage->iUserHead = i;
/*
* Insert into cache and enable monitoring of the guest page if enabled.
@@ -2880,9 +2905,9 @@ DECLINLINE(int) pgmPoolTrackInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS
*/
static int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
{
+ Log3(("pgmPoolTrackAddUser: GCPhys=%RGp iUser=%%x iUserTable=%x\n", pPage->GCPhys, iUser, iUserTable));
PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
-
- Log3(("pgmPoolTrackAddUser GCPhys = %RGp iUser %x iUserTable %x\n", pPage->GCPhys, iUser, iUserTable));
+ Assert(iUser != NIL_PGMPOOL_IDX);
# ifdef VBOX_STRICT
/*
@@ -2895,8 +2920,8 @@ static int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUse
do
{
Assert(i < pPool->cMaxUsers);
- AssertMsg(iUser != PGMPOOL_IDX_PD || iUser != PGMPOOL_IDX_PDPT || iUser != PGMPOOL_IDX_NESTED_ROOT || iUser != PGMPOOL_IDX_AMD64_CR3 ||
- paUsers[i].iUser != iUser || paUsers[i].iUserTable != iUserTable, ("%x %x vs new %x %x\n", paUsers[i].iUser, paUsers[i].iUserTable, iUser, iUserTable));
+ /** @todo this assertion looks odd... Shouldn't it be && here? */
+ AssertMsg(paUsers[i].iUser != iUser || paUsers[i].iUserTable != iUserTable, ("%x %x vs new %x %x\n", paUsers[i].iUser, paUsers[i].iUserTable, iUser, iUserTable));
i = paUsers[i].iNext;
} while (i != NIL_PGMPOOL_USER_INDEX);
}
@@ -2946,15 +2971,19 @@ static int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUse
* @param HCPhys The HC physical address of the shadow page.
* @param iUser The shadow page pool index of the user table.
* @param iUserTable The index into the user table (shadowed).
+ *
+ * @remarks Don't call this for root pages.
*/
static void pgmPoolTrackFreeUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
{
+ Log3(("pgmPoolTrackFreeUser %RGp %x %x\n", pPage->GCPhys, iUser, iUserTable));
+ PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
+ Assert(iUser != NIL_PGMPOOL_IDX);
+
/*
* Unlink and free the specified user entry.
*/
- PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
- Log3(("pgmPoolTrackFreeUser %RGp %x %x\n", pPage->GCPhys, iUser, iUserTable));
/* Special: For PAE and 32-bit paging, there is usually no more than one user. */
uint16_t i = pPage->iUserHead;
if ( i != NIL_PGMPOOL_USER_INDEX
@@ -4898,7 +4927,9 @@ int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fFlush)
* @param pPool The pool.
* @param HCPhys The HC physical address of the shadow page.
* @param iUser The shadow page pool index of the user table.
- * @param iUserTable The index into the user table (shadowed).
+ * NIL_PGMPOOL_IDX for root pages.
+ * @param iUserTable The index into the user table (shadowed). Ignored if
+ * root page.
*/
void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
{
@@ -4910,7 +4941,8 @@ void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint3
AssertReturnVoid(pPage->idx >= PGMPOOL_IDX_FIRST); /* paranoia (#6349) */
pgmLock(pVM);
- pgmPoolTrackFreeUser(pPool, pPage, iUser, iUserTable);
+ if (iUser != NIL_PGMPOOL_IDX)
+ pgmPoolTrackFreeUser(pPool, pPage, iUser, iUserTable);
if (!pPage->fCached)
pgmPoolFlushPage(pPool, pPage);
pgmUnlock(pVM);
@@ -4932,7 +4964,7 @@ void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint3
static int pgmPoolMakeMoreFreePages(PPGMPOOL pPool, PGMPOOLKIND enmKind, uint16_t iUser)
{
PVM pVM = pPool->CTX_SUFF(pVM);
- LogFlow(("pgmPoolMakeMoreFreePages: iUser=%d\n", iUser));
+ LogFlow(("pgmPoolMakeMoreFreePages: enmKind=%d iUser=%d\n", enmKind, iUser));
NOREF(enmKind);
/*
@@ -4984,8 +5016,10 @@ static int pgmPoolMakeMoreFreePages(PPGMPOOL pPool, PGMPOOLKIND enmKind, uint16_
* @param enmKind The kind of mapping.
* @param enmAccess Access type for the mapping (only relevant for big pages)
* @param fA20Enabled Whether the A20 gate is enabled or not.
- * @param iUser The shadow page pool index of the user table.
- * @param iUserTable The index into the user table (shadowed).
+ * @param iUser The shadow page pool index of the user table. Root
+ * pages should pass NIL_PGMPOOL_IDX.
+ * @param iUserTable The index into the user table (shadowed). Ignored for
+ * root pages (iUser == NIL_PGMPOOL_IDX).
* @param fLockPage Lock the page
* @param ppPage Where to store the pointer to the page. NULL is stored here on failure.
*/
@@ -5118,7 +5152,9 @@ int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS en
* @param pVM Pointer to the VM.
* @param HCPhys The HC physical address of the shadow page.
* @param iUser The shadow page pool index of the user table.
- * @param iUserTable The index into the user table (shadowed).
+ * NIL_PGMPOOL_IDX if root page.
+ * @param iUserTable The index into the user table (shadowed). Ignored if
+ * root page.
*/
void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable)
{
@@ -5406,41 +5442,6 @@ void pgmR3PoolReset(PVM pVM)
/*
* Reinsert active pages into the hash and ensure monitoring chains are correct.
*/
- for (unsigned i = PGMPOOL_IDX_FIRST_SPECIAL; i < PGMPOOL_IDX_FIRST; i++)
- {
- PPGMPOOLPAGE pPage = &pPool->aPages[i];
-
- /** @todo r=bird: Is this code still needed in any way? The special root
- * pages should not be monitored or anything these days AFAIK. */
- Assert(pPage->iNext == NIL_PGMPOOL_IDX);
- Assert(pPage->iModifiedNext == NIL_PGMPOOL_IDX);
- Assert(pPage->iModifiedPrev == NIL_PGMPOOL_IDX);
- Assert(pPage->iMonitoredNext == NIL_PGMPOOL_IDX);
- Assert(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
- Assert(!pPage->fMonitored);
-
- pPage->iNext = NIL_PGMPOOL_IDX;
- pPage->iModifiedNext = NIL_PGMPOOL_IDX;
- pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
- pPage->cModifications = 0;
- /* ASSUMES that we're not sharing with any of the other special pages (safe for now). */
- pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
- pPage->iMonitoredPrev = NIL_PGMPOOL_IDX;
- if (pPage->fMonitored)
- {
- int rc = PGMHandlerPhysicalChangeCallbacks(pVM, pPage->GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK,
- pPool->pfnAccessHandlerR3, MMHyperCCToR3(pVM, pPage),
- pPool->pfnAccessHandlerR0, MMHyperCCToR0(pVM, pPage),
- pPool->pfnAccessHandlerRC, MMHyperCCToRC(pVM, pPage),
- pPool->pszAccessHandler);
- AssertFatalRCSuccess(rc);
- pgmPoolHashInsert(pPool, pPage);
- }
- Assert(pPage->iUserHead == NIL_PGMPOOL_USER_INDEX); /* for now */
- Assert(pPage->iAgeNext == NIL_PGMPOOL_IDX);
- Assert(pPage->iAgePrev == NIL_PGMPOOL_IDX);
- }
-
for (VMCPUID i = 0; i < pVM->cCpus; i++)
{
/*
@@ -5457,7 +5458,7 @@ void pgmR3PoolReset(PVM pVM)
#endif /* IN_RING3 */
-#ifdef LOG_ENABLED
+#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
/**
* Stringifies a PGMPOOLKIND value.
*/
@@ -5528,5 +5529,5 @@ static const char *pgmPoolPoolKindToStr(uint8_t enmKind)
}
return "Unknown kind!";
}
-#endif /* LOG_ENABLED*/
+#endif /* LOG_ENABLED || VBOX_STRICT */