summaryrefslogtreecommitdiff
path: root/src/VBox/VMM/VMMR3/TM.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/VBox/VMM/VMMR3/TM.cpp')
-rw-r--r--src/VBox/VMM/VMMR3/TM.cpp137
1 files changed, 91 insertions, 46 deletions
diff --git a/src/VBox/VMM/VMMR3/TM.cpp b/src/VBox/VMM/VMMR3/TM.cpp
index 49390dd7..4391ea4d 100644
--- a/src/VBox/VMM/VMMR3/TM.cpp
+++ b/src/VBox/VMM/VMMR3/TM.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2010 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -124,6 +124,7 @@
#include <iprt/asm-amd64-x86.h> /* for SUPGetCpuHzFromGIP from sup.h */
#include <VBox/vmm/vmm.h>
#include <VBox/vmm/mm.h>
+#include <VBox/vmm/hm.h>
#include <VBox/vmm/ssm.h>
#include <VBox/vmm/dbgf.h>
#include <VBox/vmm/dbgftrace.h>
@@ -134,6 +135,7 @@
#include <VBox/vmm/iom.h>
#include "TMInternal.h"
#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
#include <VBox/vmm/pdmdev.h>
#include <VBox/param.h>
@@ -170,7 +172,7 @@ static DECLCALLBACK(int) tmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion
static DECLCALLBACK(void) tmR3TimerCallback(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
static void tmR3TimerQueueRun(PVM pVM, PTMTIMERQUEUE pQueue);
static void tmR3TimerQueueRunVirtualSync(PVM pVM);
-static DECLCALLBACK(int) tmR3SetWarpDrive(PVM pVM, uint32_t u32Percent);
+static DECLCALLBACK(int) tmR3SetWarpDrive(PUVM pUVM, uint32_t u32Percent);
#ifndef VBOX_WITHOUT_NS_ACCOUNTING
static DECLCALLBACK(void) tmR3CpuLoadTimer(PVM pVM, PTMTIMER pTimer, void *pvUser);
#endif
@@ -344,6 +346,13 @@ VMM_INT_DECL(int) TMR3Init(PVM pVM)
pVM->tm.s.fMaybeUseOffsettedHostTSC = tmR3HasFixedTSC(pVM);
else
pVM->tm.s.fMaybeUseOffsettedHostTSC = true;
+ /** @todo needs a better fix, for now disable offsetted mode for VMs
+ * with more than one VCPU. With the current TSC handling (frequent
+ * switching between offsetted mode and taking VM exits, on all VCPUs
+ * without any kind of coordination) it will lead to inconsistent TSC
+ * behavior with guest SMP, including TSC going backwards. */
+ if (pVM->cCpus != 1)
+ pVM->tm.s.fMaybeUseOffsettedHostTSC = false;
}
/** @cfgm{TM/TSCTicksPerSecond, uint32_t, Current TSC frequency from GIP}
@@ -445,7 +454,7 @@ VMM_INT_DECL(int) TMR3Init(PVM pVM)
/** @cfgm{TM/CatchUpPrecentage[0..9], uint32_t, %, 1, 2000, various}
* The catch-up percent for a given period. */
- /** @cfgm{TM/CatchUpStartThreshold[0..9], uint64_t, ns, 0, UINT64_MAX,
+ /** @cfgm{TM/CatchUpStartThreshold[0..9], uint64_t, ns, 0, UINT64_MAX}
* The catch-up period threshold, or if you like, when a period starts. */
#define TM_CFG_PERIOD(iPeriod, DefStart, DefPct) \
do \
@@ -930,21 +939,24 @@ VMM_INT_DECL(int) TMR3InitFinalize(PVM pVM)
/*
* Resolve symbols.
*/
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "tmVirtualNanoTSBad", &pVM->tm.s.VirtualGetRawDataRC.pfnBad);
- AssertRCReturn(rc, rc);
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "tmVirtualNanoTSRediscover", &pVM->tm.s.VirtualGetRawDataRC.pfnRediscover);
- AssertRCReturn(rc, rc);
- if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceSync)
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLFenceSync", &pVM->tm.s.pfnVirtualGetRawRC);
- else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceAsync)
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLFenceAsync", &pVM->tm.s.pfnVirtualGetRawRC);
- else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacySync)
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLegacySync", &pVM->tm.s.pfnVirtualGetRawRC);
- else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacyAsync)
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLegacyAsync", &pVM->tm.s.pfnVirtualGetRawRC);
- else
- AssertFatalFailed();
- AssertRCReturn(rc, rc);
+ if (!HMIsEnabled(pVM))
+ {
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "tmVirtualNanoTSBad", &pVM->tm.s.VirtualGetRawDataRC.pfnBad);
+ AssertRCReturn(rc, rc);
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "tmVirtualNanoTSRediscover", &pVM->tm.s.VirtualGetRawDataRC.pfnRediscover);
+ AssertRCReturn(rc, rc);
+ if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceSync)
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLFenceSync", &pVM->tm.s.pfnVirtualGetRawRC);
+ else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceAsync)
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLFenceAsync", &pVM->tm.s.pfnVirtualGetRawRC);
+ else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacySync)
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLegacySync", &pVM->tm.s.pfnVirtualGetRawRC);
+ else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacyAsync)
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLegacyAsync", &pVM->tm.s.pfnVirtualGetRawRC);
+ else
+ AssertFatalFailed();
+ AssertRCReturn(rc, rc);
+ }
rc = PDMR3LdrGetSymbolR0(pVM, NULL, "tmVirtualNanoTSBad", &pVM->tm.s.VirtualGetRawDataR0.pfnBad);
AssertRCReturn(rc, rc);
@@ -990,28 +1002,31 @@ VMM_INT_DECL(void) TMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
LogFlow(("TMR3Relocate\n"));
NOREF(offDelta);
- pVM->tm.s.pvGIPRC = MMHyperR3ToRC(pVM, pVM->tm.s.pvGIPR3);
- pVM->tm.s.paTimerQueuesRC = MMHyperR3ToRC(pVM, pVM->tm.s.paTimerQueuesR3);
pVM->tm.s.paTimerQueuesR0 = MMHyperR3ToR0(pVM, pVM->tm.s.paTimerQueuesR3);
- pVM->tm.s.VirtualGetRawDataRC.pu64Prev = MMHyperR3ToRC(pVM, (void *)&pVM->tm.s.u64VirtualRawPrev);
- AssertFatal(pVM->tm.s.VirtualGetRawDataRC.pu64Prev);
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "tmVirtualNanoTSBad", &pVM->tm.s.VirtualGetRawDataRC.pfnBad);
- AssertFatalRC(rc);
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "tmVirtualNanoTSRediscover", &pVM->tm.s.VirtualGetRawDataRC.pfnRediscover);
- AssertFatalRC(rc);
-
- if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceSync)
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLFenceSync", &pVM->tm.s.pfnVirtualGetRawRC);
- else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceAsync)
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLFenceAsync", &pVM->tm.s.pfnVirtualGetRawRC);
- else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacySync)
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLegacySync", &pVM->tm.s.pfnVirtualGetRawRC);
- else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacyAsync)
- rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLegacyAsync", &pVM->tm.s.pfnVirtualGetRawRC);
- else
- AssertFatalFailed();
- AssertFatalRC(rc);
+ if (!HMIsEnabled(pVM))
+ {
+ pVM->tm.s.pvGIPRC = MMHyperR3ToRC(pVM, pVM->tm.s.pvGIPR3);
+ pVM->tm.s.paTimerQueuesRC = MMHyperR3ToRC(pVM, pVM->tm.s.paTimerQueuesR3);
+ pVM->tm.s.VirtualGetRawDataRC.pu64Prev = MMHyperR3ToRC(pVM, (void *)&pVM->tm.s.u64VirtualRawPrev);
+ AssertFatal(pVM->tm.s.VirtualGetRawDataRC.pu64Prev);
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "tmVirtualNanoTSBad", &pVM->tm.s.VirtualGetRawDataRC.pfnBad);
+ AssertFatalRC(rc);
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "tmVirtualNanoTSRediscover", &pVM->tm.s.VirtualGetRawDataRC.pfnRediscover);
+ AssertFatalRC(rc);
+
+ if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceSync)
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLFenceSync", &pVM->tm.s.pfnVirtualGetRawRC);
+ else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceAsync)
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLFenceAsync", &pVM->tm.s.pfnVirtualGetRawRC);
+ else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacySync)
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLegacySync", &pVM->tm.s.pfnVirtualGetRawRC);
+ else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacyAsync)
+ rc = PDMR3LdrGetSymbolRC(pVM, NULL, "RTTimeNanoTSLegacyAsync", &pVM->tm.s.pfnVirtualGetRawRC);
+ else
+ AssertFatalFailed();
+ AssertFatalRC(rc);
+ }
/*
* Iterate the timers updating the pVMRC pointers.
@@ -1656,6 +1671,7 @@ VMMR3DECL(int) TMR3TimerDestroy(PTMTIMER pTimer)
STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
Assert(pQueue->offSchedule);
tmTimerQueueSchedule(pVM, pQueue);
+ STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
}
/*
@@ -1875,17 +1891,17 @@ static DECLCALLBACK(void) tmR3TimerCallback(PRTTIMER pTimer, void *pvUser, uint6
AssertCompile(TMCLOCK_MAX == 4);
#ifdef DEBUG_Sander /* very annoying, keep it private. */
- if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
+ if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
Log(("tmR3TimerCallback: timer event still pending!!\n"));
#endif
- if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
+ if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
&& ( pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].offSchedule /** @todo FIXME - reconsider offSchedule as a reason for running the timer queues. */
|| pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].offSchedule
|| pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL].offSchedule
|| pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].offSchedule
|| tmR3AnyExpiredTimers(pVM)
)
- && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
+ && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
&& !pVM->tm.s.fRunningQueues
)
{
@@ -2617,8 +2633,20 @@ VMMR3DECL(int) TMR3TimerSetCritSect(PTMTIMERR3 pTimer, PPDMCRITSECT pCritSect)
*/
VMMR3_INT_DECL(PRTTIMESPEC) TMR3UtcNow(PVM pVM, PRTTIMESPEC pTime)
{
+ /* Get a stable set of VirtualSync parameters before querying UTC. */
+ uint64_t offVirtualSync;
+ uint64_t offVirtualSyncGivenUp;
+ do
+ {
+ offVirtualSync = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
+ offVirtualSyncGivenUp = ASMAtomicReadU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp);
+ } while (ASMAtomicReadU64(&pVM->tm.s.offVirtualSync) != offVirtualSync);
+
+ Assert(offVirtualSync >= offVirtualSyncGivenUp);
+ uint64_t const offLag = offVirtualSync - offVirtualSyncGivenUp;
+
RTTimeNow(pTime);
- RTTimeSpecSubNano(pTime, ASMAtomicReadU64(&pVM->tm.s.offVirtualSync) - ASMAtomicReadU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp));
+ RTTimeSpecSubNano(pTime, offLag);
RTTimeSpecAddNano(pTime, pVM->tm.s.offUTC);
return pTime;
}
@@ -2718,9 +2746,9 @@ VMMR3DECL(int) TMR3NotifyResume(PVM pVM, PVMCPU pVCpu)
* @param pVM Pointer to the VM.
* @param u32Percent The new percentage. 100 means normal operation.
*/
-VMMDECL(int) TMR3SetWarpDrive(PVM pVM, uint32_t u32Percent)
+VMMDECL(int) TMR3SetWarpDrive(PUVM pUVM, uint32_t u32Percent)
{
- return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)tmR3SetWarpDrive, 2, pVM, u32Percent);
+ return VMR3ReqPriorityCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)tmR3SetWarpDrive, 2, pUVM, u32Percent);
}
@@ -2728,12 +2756,14 @@ VMMDECL(int) TMR3SetWarpDrive(PVM pVM, uint32_t u32Percent)
* EMT worker for TMR3SetWarpDrive.
*
* @returns VBox status code.
- * @param pVM Pointer to the VM.
+ * @param pUVM The user mode VM handle.
* @param u32Percent See TMR3SetWarpDrive().
* @internal
*/
-static DECLCALLBACK(int) tmR3SetWarpDrive(PVM pVM, uint32_t u32Percent)
+static DECLCALLBACK(int) tmR3SetWarpDrive(PUVM pUVM, uint32_t u32Percent)
{
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
PVMCPU pVCpu = VMMGetCpu(pVM);
/*
@@ -2768,6 +2798,21 @@ static DECLCALLBACK(int) tmR3SetWarpDrive(PVM pVM, uint32_t u32Percent)
/**
+ * Gets the current warp drive percent.
+ *
+ * @returns The warp drive percent.
+ * @param pVM Pointer to the VM.
+ */
+VMMR3DECL(uint32_t) TMR3GetWarpDrive(PUVM pUVM)
+{
+ UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
+ PVM pVM = pUVM->pVM;
+ VM_ASSERT_VALID_EXT_RETURN(pVM, UINT32_MAX);
+ return pVM->tm.s.u32VirtualWarpDrivePercentage;
+}
+
+
+/**
* Gets the performance information for one virtual CPU as seen by the VMM.
*
* The returned times covers the period where the VM is running and will be