summaryrefslogtreecommitdiff
path: root/src/VBox/VMM/VMMRZ
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@baserock.org>2014-03-26 19:21:20 +0000
committer <>2014-05-08 15:03:54 +0000
commitfb123f93f9f5ce42c8e5785d2f8e0edaf951740e (patch)
treec2103d76aec5f1f10892cd1d3a38e24f665ae5db /src/VBox/VMM/VMMRZ
parent58ed4748338f9466599adfc8a9171280ed99e23f (diff)
downloadVirtualBox-master.tar.gz
Imported from /home/lorry/working-area/delta_VirtualBox/VirtualBox-4.3.10.tar.bz2.HEADVirtualBox-4.3.10master
Diffstat (limited to 'src/VBox/VMM/VMMRZ')
-rw-r--r--src/VBox/VMM/VMMRZ/DBGFRZ.cpp34
-rw-r--r--src/VBox/VMM/VMMRZ/PGMRZDynMap.cpp6
-rw-r--r--src/VBox/VMM/VMMRZ/VMMRZ.cpp80
3 files changed, 97 insertions, 23 deletions
diff --git a/src/VBox/VMM/VMMRZ/DBGFRZ.cpp b/src/VBox/VMM/VMMRZ/DBGFRZ.cpp
index c9fc4019..ba666d7b 100644
--- a/src/VBox/VMM/VMMRZ/DBGFRZ.cpp
+++ b/src/VBox/VMM/VMMRZ/DBGFRZ.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2006-2009 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -37,12 +37,13 @@
* VINF_SUCCESS means we completely handled this trap,
* other codes are passed execution to host context.
*
- * @param pVM Pointer to the VM.
- * @param pVCpu Pointer to the VMCPU.
- * @param pRegFrame Pointer to the register frame for the trap.
- * @param uDr6 The DR6 register value.
+ * @param pVM Pointer to the VM.
+ * @param pVCpu Pointer to the VMCPU.
+ * @param pRegFrame Pointer to the register frame for the trap.
+ * @param uDr6 The DR6 hypervisor register value.
+ * @param fAltStepping Alternative stepping indicator.
*/
-VMMRZDECL(int) DBGFRZTrap01Handler(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCUINTREG uDr6)
+VMMRZ_INT_DECL(int) DBGFRZTrap01Handler(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCUINTREG uDr6, bool fAltStepping)
{
#ifdef IN_RC
const bool fInHyper = !(pRegFrame->ss.Sel & X86_SEL_RPL) && !pRegFrame->eflags.Bits.u1VM;
@@ -77,24 +78,25 @@ VMMRZDECL(int) DBGFRZTrap01Handler(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame
* Are we single stepping or is it the guest?
*/
if ( (uDr6 & X86_DR6_BS)
- && (fInHyper || pVCpu->dbgf.s.fSingleSteppingRaw))
+ && (fInHyper || pVCpu->dbgf.s.fSingleSteppingRaw || fAltStepping))
{
pVCpu->dbgf.s.fSingleSteppingRaw = false;
LogFlow(("DBGFRZTrap01Handler: single step at %04x:%RGv\n", pRegFrame->cs.Sel, pRegFrame->rip));
return fInHyper ? VINF_EM_DBG_HYPER_STEPPED : VINF_EM_DBG_STEPPED;
}
-#ifdef IN_RC
/*
- * Currently we only implement single stepping in the guest,
- * so we'll bitch if this is not a BS event.
+ * Either an ICEBP in hypervisor code or a guest related debug exception
+ * of sorts.
*/
- AssertMsg(uDr6 & X86_DR6_BS, ("hey! we're not doing guest BPs yet! dr6=%RTreg %04x:%RGv\n",
- uDr6, pRegFrame->cs.Sel, pRegFrame->rip));
-#endif
+ if (RT_UNLIKELY(fInHyper))
+ {
+ LogFlow(("DBGFRZTrap01Handler: unabled bp at %04x:%RGv\n", pRegFrame->cs.Sel, pRegFrame->rip));
+ return VERR_DBGF_HYPER_DB_XCPT;
+ }
- LogFlow(("DBGFRZTrap01Handler: guest debug event %RTreg at %04x:%RGv!\n", uDr6, pRegFrame->cs.Sel, pRegFrame->rip));
- return fInHyper ? VERR_DBGF_HYPER_DB_XCPT : VINF_EM_RAW_GUEST_TRAP;
+ LogFlow(("DBGFRZTrap01Handler: guest debug event %#x at %04x:%RGv!\n", (uint32_t)uDr6, pRegFrame->cs.Sel, pRegFrame->rip));
+ return VINF_EM_RAW_GUEST_TRAP;
}
@@ -109,7 +111,7 @@ VMMRZDECL(int) DBGFRZTrap01Handler(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame
* @param pVCpu Pointer to the VMCPU.
* @param pRegFrame Pointer to the register frame for the trap.
*/
-VMMRZDECL(int) DBGFRZTrap03Handler(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
+VMMRZ_INT_DECL(int) DBGFRZTrap03Handler(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
{
#ifdef IN_RC
const bool fInHyper = !(pRegFrame->ss.Sel & X86_SEL_RPL) && !pRegFrame->eflags.Bits.u1VM;
diff --git a/src/VBox/VMM/VMMRZ/PGMRZDynMap.cpp b/src/VBox/VMM/VMMRZ/PGMRZDynMap.cpp
index 8e43064d..eccc1f53 100644
--- a/src/VBox/VMM/VMMRZ/PGMRZDynMap.cpp
+++ b/src/VBox/VMM/VMMRZ/PGMRZDynMap.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2008-2011 Oracle Corporation
+ * Copyright (C) 2008-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -481,7 +481,7 @@ VMMR0DECL(int) PGMR0DynMapInitVM(PVM pVM)
/*
* Do we need the cache? Skip the last bit if we don't.
*/
- if (!VMMIsHwVirtExtForced(pVM))
+ if (!HMIsEnabled(pVM))
return VINF_SUCCESS;
/*
@@ -1841,7 +1841,7 @@ VMMDECL(void) PGMRZDynMapStartAutoSet(PVMCPU pVCpu)
/**
* Starts or migrates the autoset of a virtual CPU.
*
- * This is used by HWACCMR0Enter. When we've longjumped out of the HWACCM
+ * This is used by HMR0Enter. When we've longjumped out of the HM
* execution loop with the set open, we'll migrate it when re-entering. While
* under normal circumstances, we'll start it so VMXR0LoadGuestState can access
* guest memory.
diff --git a/src/VBox/VMM/VMMRZ/VMMRZ.cpp b/src/VBox/VMM/VMMRZ/VMMRZ.cpp
index 9a86cfb6..e78f41fb 100644
--- a/src/VBox/VMM/VMMRZ/VMMRZ.cpp
+++ b/src/VBox/VMM/VMMRZ/VMMRZ.cpp
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2009 Oracle Corporation
+ * Copyright (C) 2009-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -25,6 +25,7 @@
#include <VBox/err.h>
#include <iprt/assert.h>
+#include <iprt/asm-amd64-x86.h>
#include <iprt/string.h>
@@ -52,6 +53,7 @@ VMMRZDECL(int) VMMRZCallRing3(PVM pVM, PVMCPU pVCpu, VMMCALLRING3 enmOperation,
if (RT_UNLIKELY( pVCpu->vmm.s.cCallRing3Disabled != 0
&& enmOperation != VMMCALLRING3_VM_R0_ASSERTION))
{
+#ifndef IN_RING0
/*
* In most cases, it's sufficient to return a status code which
* will then be propagated up the code usually encountering several
@@ -64,6 +66,7 @@ VMMRZDECL(int) VMMRZCallRing3(PVM pVM, PVMCPU pVCpu, VMMCALLRING3 enmOperation,
*/
if (enmOperation != VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS)
return VERR_VMM_RING3_CALL_DISABLED;
+#endif
#ifdef IN_RC
RTStrPrintf(g_szRTAssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
"VMMRZCallRing3: enmOperation=%d uArg=%#llx idCpu=%#x\n", enmOperation, uArg, pVCpu->idCpu);
@@ -83,7 +86,14 @@ VMMRZDECL(int) VMMRZCallRing3(PVM pVM, PVMCPU pVCpu, VMMCALLRING3 enmOperation,
#ifdef IN_RC
pVM->vmm.s.pfnRCToHost(VINF_VMM_CALL_HOST);
#else
- int rc = vmmR0CallRing3LongJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, VINF_VMM_CALL_HOST);
+ int rc;
+ if (pVCpu->vmm.s.pfnCallRing3CallbackR0)
+ {
+ rc = pVCpu->vmm.s.pfnCallRing3CallbackR0(pVCpu, enmOperation, pVCpu->vmm.s.pvCallRing3CallbackUserR0);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+ rc = vmmR0CallRing3LongJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, VINF_VMM_CALL_HOST);
if (RT_FAILURE(rc))
return rc;
#endif
@@ -118,8 +128,12 @@ VMMRZDECL(int) VMMRZCallRing3NoCpu(PVM pVM, VMMCALLRING3 enmOperation, uint64_t
VMMRZDECL(void) VMMRZCallRing3Disable(PVMCPU pVCpu)
{
VMCPU_ASSERT_EMT(pVCpu);
+#if defined(LOG_ENABLED) && defined(IN_RING0)
+ RTCCUINTREG fFlags = ASMIntDisableFlags(); /* preemption consistency. */
+#endif
+
Assert(pVCpu->vmm.s.cCallRing3Disabled < 16);
- if (++pVCpu->vmm.s.cCallRing3Disabled == 1)
+ if (ASMAtomicIncU32(&pVCpu->vmm.s.cCallRing3Disabled) == 1) /** @todo replace with unordered variant (ASMAtomicUoIncU32). */
{
/** @todo it might make more sense to just disable logging here, then we
* won't flush away important bits... but that goes both ways really. */
@@ -132,6 +146,10 @@ VMMRZDECL(void) VMMRZCallRing3Disable(PVMCPU pVCpu)
# endif
#endif
}
+
+#if defined(LOG_ENABLED) && defined(IN_RING0)
+ ASMSetFlags(fFlags);
+#endif
}
@@ -144,8 +162,12 @@ VMMRZDECL(void) VMMRZCallRing3Disable(PVMCPU pVCpu)
VMMRZDECL(void) VMMRZCallRing3Enable(PVMCPU pVCpu)
{
VMCPU_ASSERT_EMT(pVCpu);
+#if defined(LOG_ENABLED) && defined(IN_RING0)
+ RTCCUINTREG fFlags = ASMIntDisableFlags(); /* preemption consistency. */
+#endif
+
Assert(pVCpu->vmm.s.cCallRing3Disabled > 0);
- if (--pVCpu->vmm.s.cCallRing3Disabled == 0)
+ if (ASMAtomicDecU32(&pVCpu->vmm.s.cCallRing3Disabled) == 0) /** @todo replace with unordered variant (ASMAtomicUoDecU32). */
{
#ifdef IN_RC
pVCpu->pVMRC->vmm.s.fRCLoggerFlushingDisabled = false;
@@ -156,6 +178,10 @@ VMMRZDECL(void) VMMRZCallRing3Enable(PVMCPU pVCpu)
# endif
#endif
}
+
+#if defined(LOG_ENABLED) && defined(IN_RING0)
+ ASMSetFlags(fFlags);
+#endif
}
@@ -172,3 +198,49 @@ VMMRZDECL(bool) VMMRZCallRing3IsEnabled(PVMCPU pVCpu)
return pVCpu->vmm.s.cCallRing3Disabled == 0;
}
+
+/**
+ * Sets the ring-0 callback before doing the ring-3 call.
+ *
+ * @param pVCpu Pointer to the VMCPU.
+ * @param pfnCallback Pointer to the callback.
+ * @param pvUser The user argument.
+ *
+ * @return VBox status code.
+ */
+VMMRZDECL(int) VMMRZCallRing3SetNotification(PVMCPU pVCpu, R0PTRTYPE(PFNVMMR0CALLRING3NOTIFICATION) pfnCallback, RTR0PTR pvUser)
+{
+ AssertPtrReturn(pVCpu, VERR_INVALID_POINTER);
+ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
+
+ if (pVCpu->vmm.s.pfnCallRing3CallbackR0)
+ return VERR_ALREADY_EXISTS;
+
+ pVCpu->vmm.s.pfnCallRing3CallbackR0 = pfnCallback;
+ pVCpu->vmm.s.pvCallRing3CallbackUserR0 = pvUser;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Removes the ring-0 callback.
+ *
+ * @param pVCpu Pointer to the VMCPU.
+ */
+VMMRZDECL(void) VMMRZCallRing3RemoveNotification(PVMCPU pVCpu)
+{
+ pVCpu->vmm.s.pfnCallRing3CallbackR0 = NULL;
+}
+
+
+/**
+ * Checks whether there is a ring-0 callback notification active.
+ *
+ * @param pVCpu Pointer to the VMCPU.
+ * @returns true if there the notification is active, false otherwise.
+ */
+VMMRZDECL(bool) VMMRZCallRing3IsNotificationSet(PVMCPU pVCpu)
+{
+ return pVCpu->vmm.s.pfnCallRing3CallbackR0 != NULL;
+}
+