summaryrefslogtreecommitdiff
path: root/lib/xray/xray_interface.cc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/xray/xray_interface.cc')
-rw-r--r--lib/xray/xray_interface.cc229
1 files changed, 172 insertions, 57 deletions
diff --git a/lib/xray/xray_interface.cc b/lib/xray/xray_interface.cc
index 39cf8efb3..7ad6a9b84 100644
--- a/lib/xray/xray_interface.cc
+++ b/lib/xray/xray_interface.cc
@@ -15,7 +15,6 @@
#include "xray_interface_internal.h"
-#include <atomic>
#include <cstdint>
#include <cstdio>
#include <errno.h>
@@ -46,10 +45,13 @@ static const int16_t cSledLength = 8;
#endif /* CPU architecture */
// This is the function to call when we encounter the entry or exit sleds.
-std::atomic<void (*)(int32_t, XRayEntryType)> XRayPatchedFunction{nullptr};
+__sanitizer::atomic_uintptr_t XRayPatchedFunction{0};
// This is the function to call from the arg1-enabled sleds/trampolines.
-std::atomic<void (*)(int32_t, XRayEntryType, uint64_t)> XRayArgLogger{nullptr};
+__sanitizer::atomic_uintptr_t XRayArgLogger{0};
+
+// This is the function to call when we encounter a custom event log call.
+__sanitizer::atomic_uintptr_t XRayPatchedCustomEvent{0};
// MProtectHelper is an RAII wrapper for calls to mprotect(...) that will undo
// any successful mprotect(...) changes. This is used to make a page writeable
@@ -88,23 +90,45 @@ public:
} // namespace __xray
-extern std::atomic<bool> XRayInitialized;
-extern std::atomic<__xray::XRaySledMap> XRayInstrMap;
+extern __sanitizer::SpinMutex XRayInstrMapMutex;
+extern __sanitizer::atomic_uint8_t XRayInitialized;
+extern __xray::XRaySledMap XRayInstrMap;
int __xray_set_handler(void (*entry)(int32_t,
XRayEntryType)) XRAY_NEVER_INSTRUMENT {
- if (XRayInitialized.load(std::memory_order_acquire)) {
- __xray::XRayPatchedFunction.store(entry, std::memory_order_release);
+ if (__sanitizer::atomic_load(&XRayInitialized,
+ __sanitizer::memory_order_acquire)) {
+
+ __sanitizer::atomic_store(&__xray::XRayPatchedFunction,
+ reinterpret_cast<uintptr_t>(entry),
+ __sanitizer::memory_order_release);
+ return 1;
+ }
+ return 0;
+}
+
+int __xray_set_customevent_handler(void (*entry)(void *, size_t))
+ XRAY_NEVER_INSTRUMENT {
+ if (__sanitizer::atomic_load(&XRayInitialized,
+ __sanitizer::memory_order_acquire)) {
+ __sanitizer::atomic_store(&__xray::XRayPatchedCustomEvent,
+ reinterpret_cast<uintptr_t>(entry),
+ __sanitizer::memory_order_release);
return 1;
}
return 0;
}
+
int __xray_remove_handler() XRAY_NEVER_INSTRUMENT {
return __xray_set_handler(nullptr);
}
-std::atomic<bool> XRayPatching{false};
+int __xray_remove_customevent_handler() XRAY_NEVER_INSTRUMENT {
+ return __xray_set_customevent_handler(nullptr);
+}
+
+__sanitizer::atomic_uint8_t XRayPatching{0};
using namespace __xray;
@@ -128,30 +152,72 @@ CleanupInvoker<Function> scopeCleanup(Function Fn) XRAY_NEVER_INSTRUMENT {
return CleanupInvoker<Function>{Fn};
}
+inline bool patchSled(const XRaySledEntry &Sled, bool Enable,
+ int32_t FuncId) XRAY_NEVER_INSTRUMENT {
+ // While we're here, we should patch the nop sled. To do that we mprotect
+ // the page containing the function to be writeable.
+ const uint64_t PageSize = GetPageSizeCached();
+ void *PageAlignedAddr =
+ reinterpret_cast<void *>(Sled.Address & ~(PageSize - 1));
+ std::size_t MProtectLen = (Sled.Address + cSledLength) -
+ reinterpret_cast<uint64_t>(PageAlignedAddr);
+ MProtectHelper Protector(PageAlignedAddr, MProtectLen);
+ if (Protector.MakeWriteable() == -1) {
+ printf("Failed mprotect: %d\n", errno);
+ return XRayPatchingStatus::FAILED;
+ }
+
+ bool Success = false;
+ switch (Sled.Kind) {
+ case XRayEntryType::ENTRY:
+ Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_FunctionEntry);
+ break;
+ case XRayEntryType::EXIT:
+ Success = patchFunctionExit(Enable, FuncId, Sled);
+ break;
+ case XRayEntryType::TAIL:
+ Success = patchFunctionTailExit(Enable, FuncId, Sled);
+ break;
+ case XRayEntryType::LOG_ARGS_ENTRY:
+ Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_ArgLoggerEntry);
+ break;
+ case XRayEntryType::CUSTOM_EVENT:
+ Success = patchCustomEvent(Enable, FuncId, Sled);
+ break;
+ default:
+ Report("Unsupported sled kind '%d' @%04x\n", Sled.Address, int(Sled.Kind));
+ return false;
+ }
+ return Success;
+}
+
// controlPatching implements the common internals of the patching/unpatching
// implementation. |Enable| defines whether we're enabling or disabling the
// runtime XRay instrumentation.
XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
- if (!XRayInitialized.load(std::memory_order_acquire))
+ if (!__sanitizer::atomic_load(&XRayInitialized,
+ __sanitizer::memory_order_acquire))
return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
- static bool NotPatching = false;
- if (!XRayPatching.compare_exchange_strong(NotPatching, true,
- std::memory_order_acq_rel,
- std::memory_order_acquire)) {
+ uint8_t NotPatching = false;
+ if (!__sanitizer::atomic_compare_exchange_strong(
+ &XRayPatching, &NotPatching, true, __sanitizer::memory_order_acq_rel))
return XRayPatchingStatus::ONGOING; // Already patching.
- }
- bool PatchingSuccess = false;
+ uint8_t PatchingSuccess = false;
auto XRayPatchingStatusResetter = scopeCleanup([&PatchingSuccess] {
- if (!PatchingSuccess) {
- XRayPatching.store(false, std::memory_order_release);
- }
+ if (!PatchingSuccess)
+ __sanitizer::atomic_store(&XRayPatching, false,
+ __sanitizer::memory_order_release);
});
// Step 1: Compute the function id, as a unique identifier per function in the
// instrumentation map.
- XRaySledMap InstrMap = XRayInstrMap.load(std::memory_order_acquire);
+ XRaySledMap InstrMap;
+ {
+ __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex);
+ InstrMap = XRayInstrMap;
+ }
if (InstrMap.Entries == 0)
return XRayPatchingStatus::NOT_INITIALIZED;
@@ -172,40 +238,10 @@ XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
++FuncId;
CurFun = F;
}
-
- // While we're here, we should patch the nop sled. To do that we mprotect
- // the page containing the function to be writeable.
- void *PageAlignedAddr =
- reinterpret_cast<void *>(Sled.Address & ~(PageSize - 1));
- std::size_t MProtectLen = (Sled.Address + cSledLength) -
- reinterpret_cast<uint64_t>(PageAlignedAddr);
- MProtectHelper Protector(PageAlignedAddr, MProtectLen);
- if (Protector.MakeWriteable() == -1) {
- printf("Failed mprotect: %d\n", errno);
- return XRayPatchingStatus::FAILED;
- }
-
- bool Success = false;
- switch (Sled.Kind) {
- case XRayEntryType::ENTRY:
- Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_FunctionEntry);
- break;
- case XRayEntryType::EXIT:
- Success = patchFunctionExit(Enable, FuncId, Sled);
- break;
- case XRayEntryType::TAIL:
- Success = patchFunctionTailExit(Enable, FuncId, Sled);
- break;
- case XRayEntryType::LOG_ARGS_ENTRY:
- Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_ArgLoggerEntry);
- break;
- default:
- Report("Unsupported sled kind: %d\n", int(Sled.Kind));
- continue;
- }
- (void)Success;
+ patchSled(Sled, Enable, FuncId);
}
- XRayPatching.store(false, std::memory_order_release);
+ __sanitizer::atomic_store(&XRayPatching, false,
+ __sanitizer::memory_order_release);
PatchingSuccess = true;
return XRayPatchingStatus::SUCCESS;
}
@@ -218,15 +254,94 @@ XRayPatchingStatus __xray_unpatch() XRAY_NEVER_INSTRUMENT {
return controlPatching(false);
}
-int __xray_set_handler_arg1(void (*Handler)(int32_t, XRayEntryType, uint64_t))
-{
- if (!XRayInitialized.load(std::memory_order_acquire)) {
- return 0;
+XRayPatchingStatus patchFunction(int32_t FuncId,
+ bool Enable) XRAY_NEVER_INSTRUMENT {
+ if (!__sanitizer::atomic_load(&XRayInitialized,
+ __sanitizer::memory_order_acquire))
+ return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
+
+ uint8_t NotPatching = false;
+ if (!__sanitizer::atomic_compare_exchange_strong(
+ &XRayPatching, &NotPatching, true, __sanitizer::memory_order_acq_rel))
+ return XRayPatchingStatus::ONGOING; // Already patching.
+
+ // Next, we look for the function index.
+ XRaySledMap InstrMap;
+ {
+ __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex);
+ InstrMap = XRayInstrMap;
+ }
+
+ // If we don't have an index, we can't patch individual functions.
+ if (InstrMap.Functions == 0)
+ return XRayPatchingStatus::NOT_INITIALIZED;
+
+ // FuncId must be a positive number, less than the number of functions
+ // instrumented.
+ if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions) {
+ Report("Invalid function id provided: %d\n", FuncId);
+ return XRayPatchingStatus::FAILED;
+ }
+
+ // Now we patch ths sleds for this specific function.
+ auto SledRange = InstrMap.SledsIndex[FuncId - 1];
+ auto *f = SledRange.Begin;
+ auto *e = SledRange.End;
+
+ bool SucceedOnce = false;
+ while (f != e)
+ SucceedOnce |= patchSled(*f++, Enable, FuncId);
+
+ __sanitizer::atomic_store(&XRayPatching, false,
+ __sanitizer::memory_order_release);
+
+ if (!SucceedOnce) {
+ Report("Failed patching any sled for function '%d'.", FuncId);
+ return XRayPatchingStatus::FAILED;
}
+
+ return XRayPatchingStatus::SUCCESS;
+}
+
+XRayPatchingStatus __xray_patch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
+ return patchFunction(FuncId, true);
+}
+
+XRayPatchingStatus
+__xray_unpatch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
+ return patchFunction(FuncId, false);
+}
+
+int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType, uint64_t)) {
+ if (!__sanitizer::atomic_load(&XRayInitialized,
+ __sanitizer::memory_order_acquire))
+ return 0;
+
// A relaxed write might not be visible even if the current thread gets
// scheduled on a different CPU/NUMA node. We need to wait for everyone to
// have this handler installed for consistency of collected data across CPUs.
- XRayArgLogger.store(Handler, std::memory_order_release);
+ __sanitizer::atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(entry),
+ __sanitizer::memory_order_release);
return 1;
}
+
int __xray_remove_handler_arg1() { return __xray_set_handler_arg1(nullptr); }
+
+uintptr_t __xray_function_address(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
+ __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex);
+ if (FuncId <= 0 || static_cast<size_t>(FuncId) > XRayInstrMap.Functions)
+ return 0;
+ return XRayInstrMap.SledsIndex[FuncId - 1].Begin->Function
+// On PPC, function entries are always aligned to 16 bytes. The beginning of a
+// sled might be a local entry, which is always +8 based on the global entry.
+// Always return the global entry.
+#ifdef __PPC__
+ & ~0xf
+#endif
+ ;
+}
+
+size_t __xray_max_function_id() XRAY_NEVER_INSTRUMENT {
+ __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex);
+ return XRayInstrMap.Functions;
+}