diff options
author | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2020-10-12 14:27:29 +0200 |
---|---|---|
committer | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2020-10-13 09:35:20 +0000 |
commit | c30a6232df03e1efbd9f3b226777b07e087a1122 (patch) | |
tree | e992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/third_party/abseil-cpp/absl/synchronization | |
parent | 7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff) | |
download | qtwebengine-chromium-85-based.tar.gz |
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057
Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/third_party/abseil-cpp/absl/synchronization')
3 files changed, 39 insertions, 46 deletions
diff --git a/chromium/third_party/abseil-cpp/absl/synchronization/internal/mutex_nonprod.inc b/chromium/third_party/abseil-cpp/absl/synchronization/internal/mutex_nonprod.inc index a1502e727da..d83bc8a94c7 100644 --- a/chromium/third_party/abseil-cpp/absl/synchronization/internal/mutex_nonprod.inc +++ b/chromium/third_party/abseil-cpp/absl/synchronization/internal/mutex_nonprod.inc @@ -209,31 +209,22 @@ class SynchronizationStorage { // Instances allocated on the heap or on the stack should use the default // constructor. SynchronizationStorage() - : is_dynamic_(true), once_() {} - - // Instances allocated in static storage (not on the heap, not on the - // stack) should use this constructor. - explicit SynchronizationStorage(base_internal::LinkerInitialized) {} + : destruct_(true), once_() {} constexpr explicit SynchronizationStorage(absl::ConstInitType) - : is_dynamic_(false), once_(), space_{{0}} {} + : destruct_(false), once_(), space_{{0}} {} SynchronizationStorage(SynchronizationStorage&) = delete; SynchronizationStorage& operator=(SynchronizationStorage&) = delete; ~SynchronizationStorage() { - if (is_dynamic_) { + if (destruct_) { get()->~T(); } } // Retrieve the object in storage. This is fast and thread safe, but does // incur the cost of absl::call_once(). - // - // For instances in static storage constructed with the - // LinkerInitialized constructor, may be called at any time without - // regard for order of dynamic initialization or destruction of objects - // in static storage. See the class comment for caveats. T* get() { absl::call_once(once_, SynchronizationStorage::Construct, this); return reinterpret_cast<T*>(&space_); @@ -245,10 +236,7 @@ class SynchronizationStorage { } // When true, T's destructor is run when this is destructed. - // - // The LinkerInitialized constructor assumes this value will be set - // false by static initialization. - bool is_dynamic_; + const bool destruct_; absl::once_flag once_; diff --git a/chromium/third_party/abseil-cpp/absl/synchronization/internal/waiter.cc b/chromium/third_party/abseil-cpp/absl/synchronization/internal/waiter.cc index 2949f5a84c8..b6150b9b2bf 100644 --- a/chromium/third_party/abseil-cpp/absl/synchronization/internal/waiter.cc +++ b/chromium/third_party/abseil-cpp/absl/synchronization/internal/waiter.cc @@ -86,6 +86,14 @@ static void MaybeBecomeIdle() { #endif #endif +#if defined(__NR_futex_time64) && !defined(SYS_futex_time64) +#define SYS_futex_time64 __NR_futex_time64 +#endif + +#if defined(SYS_futex_time64) && !defined(SYS_futex) +#define SYS_futex SYS_futex_time64 +#endif + class Futex { public: static int WaitUntil(std::atomic<int32_t> *v, int32_t val, diff --git a/chromium/third_party/abseil-cpp/absl/synchronization/mutex.cc b/chromium/third_party/abseil-cpp/absl/synchronization/mutex.cc index 44ec15c3cc8..c7968f06bbd 100644 --- a/chromium/third_party/abseil-cpp/absl/synchronization/mutex.cc +++ b/chromium/third_party/abseil-cpp/absl/synchronization/mutex.cc @@ -39,6 +39,7 @@ #include <thread> // NOLINT(build/c++11) #include "absl/base/attributes.h" +#include "absl/base/call_once.h" #include "absl/base/config.h" #include "absl/base/dynamic_annotations.h" #include "absl/base/internal/atomic_hook.h" @@ -85,28 +86,6 @@ ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection( kDeadlockDetectionDefault); ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false); -// ------------------------------------------ spinlock support - -// Make sure read-only globals used in the Mutex code are contained on the -// same cacheline and cacheline aligned to eliminate any false sharing with -// other globals from this and other modules. -static struct MutexGlobals { - MutexGlobals() { - // Find machine-specific data needed for Delay() and - // TryAcquireWithSpinning(). This runs in the global constructor - // sequence, and before that zeros are safe values. - num_cpus = absl::base_internal::NumCPUs(); - spinloop_iterations = num_cpus > 1 ? 1500 : 0; - } - int num_cpus; - int spinloop_iterations; - // Pad this struct to a full cacheline to prevent false sharing. - char padding[ABSL_CACHELINE_SIZE - 2 * sizeof(int)]; -} ABSL_CACHELINE_ALIGNED mutex_globals; -static_assert( - sizeof(MutexGlobals) == ABSL_CACHELINE_SIZE, - "MutexGlobals must occupy an entire cacheline to prevent false sharing"); - ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)> submit_profile_data; @@ -143,7 +122,22 @@ void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) { symbolizer.Store(fn); } -// spinlock delay on iteration c. Returns new c. +struct ABSL_CACHELINE_ALIGNED MutexGlobals { + absl::once_flag once; + int num_cpus = 0; + int spinloop_iterations = 0; +}; + +static const MutexGlobals& GetMutexGlobals() { + ABSL_CONST_INIT static MutexGlobals data; + absl::base_internal::LowLevelCallOnce(&data.once, [&]() { + data.num_cpus = absl::base_internal::NumCPUs(); + data.spinloop_iterations = data.num_cpus > 1 ? 1500 : 0; + }); + return data; +} + +// Spinlock delay on iteration c. Returns new c. namespace { enum DelayMode { AGGRESSIVE, GENTLE }; }; @@ -153,22 +147,25 @@ static int Delay(int32_t c, DelayMode mode) { // gentle then spin only a few times before yielding. Aggressive spinning is // used to ensure that an Unlock() call, which must get the spin lock for // any thread to make progress gets it without undue delay. - int32_t limit = (mutex_globals.num_cpus > 1) ? - ((mode == AGGRESSIVE) ? 5000 : 250) : 0; + const int32_t limit = + GetMutexGlobals().num_cpus > 1 ? (mode == AGGRESSIVE ? 5000 : 250) : 0; if (c < limit) { - c++; // spin + // Spin. + c++; } else { ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0); - if (c == limit) { // yield once + if (c == limit) { + // Yield once. AbslInternalMutexYield(); c++; - } else { // then wait + } else { + // Then wait. absl::SleepFor(absl::Microseconds(10)); c = 0; } ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0); } - return (c); + return c; } // --------------------------Generic atomic ops @@ -1437,7 +1434,7 @@ void Mutex::AssertNotHeld() const { // Attempt to acquire *mu, and return whether successful. The implementation // may spin for a short while if the lock cannot be acquired immediately. static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) { - int c = mutex_globals.spinloop_iterations; + int c = GetMutexGlobals().spinloop_iterations; do { // do/while somewhat faster on AMD intptr_t v = mu->load(std::memory_order_relaxed); if ((v & (kMuReader|kMuEvent)) != 0) { |