summaryrefslogtreecommitdiff
path: root/Source/bmalloc
diff options
context:
space:
mode:
Diffstat (limited to 'Source/bmalloc')
-rw-r--r--Source/bmalloc/CMakeLists.txt36
-rw-r--r--Source/bmalloc/bmalloc/Algorithm.h128
-rw-r--r--Source/bmalloc/bmalloc/Allocator.cpp202
-rw-r--r--Source/bmalloc/bmalloc/Allocator.h95
-rw-r--r--Source/bmalloc/bmalloc/AsyncTask.h137
-rw-r--r--Source/bmalloc/bmalloc/BAssert.h109
-rw-r--r--Source/bmalloc/bmalloc/BPlatform.h196
-rw-r--r--Source/bmalloc/bmalloc/BumpAllocator.h98
-rw-r--r--Source/bmalloc/bmalloc/BumpRange.h44
-rw-r--r--Source/bmalloc/bmalloc/Cache.cpp84
-rw-r--r--Source/bmalloc/bmalloc/Cache.h117
-rw-r--r--Source/bmalloc/bmalloc/Chunk.h153
-rw-r--r--Source/bmalloc/bmalloc/Deallocator.cpp101
-rw-r--r--Source/bmalloc/bmalloc/Deallocator.h80
-rw-r--r--Source/bmalloc/bmalloc/DebugHeap.cpp111
-rw-r--r--Source/bmalloc/bmalloc/DebugHeap.h51
-rw-r--r--Source/bmalloc/bmalloc/Environment.cpp126
-rw-r--r--Source/bmalloc/bmalloc/Environment.h45
-rw-r--r--Source/bmalloc/bmalloc/FixedVector.h119
-rw-r--r--Source/bmalloc/bmalloc/Heap.cpp430
-rw-r--r--Source/bmalloc/bmalloc/Heap.h153
-rw-r--r--Source/bmalloc/bmalloc/Inline.h33
-rw-r--r--Source/bmalloc/bmalloc/LargeMap.cpp79
-rw-r--r--Source/bmalloc/bmalloc/LargeMap.h47
-rw-r--r--Source/bmalloc/bmalloc/LargeRange.h110
-rw-r--r--Source/bmalloc/bmalloc/LineMetadata.h46
-rw-r--r--Source/bmalloc/bmalloc/List.h102
-rw-r--r--Source/bmalloc/bmalloc/Logging.cpp65
-rw-r--r--Source/bmalloc/bmalloc/Logging.h38
-rw-r--r--Source/bmalloc/bmalloc/Map.h134
-rw-r--r--Source/bmalloc/bmalloc/Mutex.h48
-rw-r--r--Source/bmalloc/bmalloc/Object.h81
-rw-r--r--Source/bmalloc/bmalloc/ObjectType.cpp49
-rw-r--r--Source/bmalloc/bmalloc/ObjectType.h45
-rw-r--r--Source/bmalloc/bmalloc/PerProcess.h110
-rw-r--r--Source/bmalloc/bmalloc/PerThread.h148
-rw-r--r--Source/bmalloc/bmalloc/Range.h73
-rw-r--r--Source/bmalloc/bmalloc/ScopeExit.h54
-rw-r--r--Source/bmalloc/bmalloc/Sizes.h130
-rw-r--r--Source/bmalloc/bmalloc/SmallLine.h69
-rw-r--r--Source/bmalloc/bmalloc/SmallPage.h87
-rw-r--r--Source/bmalloc/bmalloc/StaticMutex.cpp53
-rw-r--r--Source/bmalloc/bmalloc/StaticMutex.h103
-rw-r--r--Source/bmalloc/bmalloc/Syscall.h35
-rw-r--r--Source/bmalloc/bmalloc/VMAllocate.h230
-rw-r--r--Source/bmalloc/bmalloc/VMHeap.cpp101
-rw-r--r--Source/bmalloc/bmalloc/VMHeap.h86
-rw-r--r--Source/bmalloc/bmalloc/Vector.h237
-rw-r--r--Source/bmalloc/bmalloc/Zone.cpp132
-rw-r--r--Source/bmalloc/bmalloc/Zone.h73
-rw-r--r--Source/bmalloc/bmalloc/bmalloc.h97
-rw-r--r--Source/bmalloc/bmalloc/darwin/BSoftLinking.h66
-rw-r--r--Source/bmalloc/bmalloc/mbmalloc.cpp63
53 files changed, 5439 insertions, 0 deletions
diff --git a/Source/bmalloc/CMakeLists.txt b/Source/bmalloc/CMakeLists.txt
new file mode 100644
index 000000000..7e3139aaa
--- /dev/null
+++ b/Source/bmalloc/CMakeLists.txt
@@ -0,0 +1,36 @@
+set_property(DIRECTORY . PROPERTY FOLDER "bmalloc")
+
+set(bmalloc_INCLUDE_DIRECTORIES
+ "${BMALLOC_DIR}"
+)
+
+set(bmalloc_SOURCES
+ bmalloc/Allocator.cpp
+ bmalloc/Cache.cpp
+ bmalloc/Deallocator.cpp
+ bmalloc/DebugHeap.cpp
+ bmalloc/Environment.cpp
+ bmalloc/Heap.cpp
+ bmalloc/LargeMap.cpp
+ bmalloc/Logging.cpp
+ bmalloc/ObjectType.cpp
+ bmalloc/StaticMutex.cpp
+ bmalloc/VMHeap.cpp
+ bmalloc/mbmalloc.cpp
+)
+
+if (CMAKE_SYSTEM_NAME MATCHES "Darwin")
+ list(APPEND bmalloc_SOURCES
+ bmalloc/Zone.cpp
+ )
+endif ()
+
+set(bmalloc_LIBRARIES
+ ${CMAKE_DL_LIBS}
+)
+
+WEBKIT_WRAP_SOURCELIST(${bmalloc_SOURCES})
+include_directories(${bmalloc_INCLUDE_DIRECTORIES})
+add_library(bmalloc STATIC ${bmalloc_SOURCES})
+target_link_libraries(bmalloc ${bmalloc_LIBRARIES})
+set_target_properties(bmalloc PROPERTIES COMPILE_DEFINITIONS "BUILDING_bmalloc")
diff --git a/Source/bmalloc/bmalloc/Algorithm.h b/Source/bmalloc/bmalloc/Algorithm.h
new file mode 100644
index 000000000..6ac792ece
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Algorithm.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Algorithm_h
+#define Algorithm_h
+
+#include "Algorithm.h"
+#include "BAssert.h"
+#include <algorithm>
+#include <cstdint>
+#include <cstddef>
+#include <limits>
+#include <type_traits>
+#include <chrono>
+
+namespace bmalloc {
+
+// Versions of min and max that are compatible with compile-time constants.
+template<typename T> inline constexpr T max(T a, T b)
+{
+ return a > b ? a : b;
+}
+
+template<typename T> inline constexpr T min(T a, T b)
+{
+ return a < b ? a : b;
+}
+
+template<typename T> inline constexpr T mask(T value, uintptr_t mask)
+{
+ return reinterpret_cast<T>(reinterpret_cast<uintptr_t>(value) & mask);
+}
+
+template<typename T> inline constexpr bool test(T value, uintptr_t mask)
+{
+ return !!(reinterpret_cast<uintptr_t>(value) & mask);
+}
+
+inline constexpr bool isPowerOfTwo(size_t size)
+{
+ return size && !(size & (size - 1));
+}
+
+template<typename T> inline T roundUpToMultipleOf(size_t divisor, T x)
+{
+ BASSERT(isPowerOfTwo(divisor));
+ return reinterpret_cast<T>((reinterpret_cast<uintptr_t>(x) + (divisor - 1)) & ~(divisor - 1));
+}
+
+template<size_t divisor, typename T> inline constexpr T roundUpToMultipleOf(T x)
+{
+ static_assert(isPowerOfTwo(divisor), "'divisor' must be a power of two.");
+ return roundUpToMultipleOf(divisor, x);
+}
+
+template<typename T> inline T roundDownToMultipleOf(size_t divisor, T x)
+{
+ BASSERT(isPowerOfTwo(divisor));
+ return reinterpret_cast<T>(mask(reinterpret_cast<uintptr_t>(x), ~(divisor - 1ul)));
+}
+
+template<size_t divisor, typename T> inline constexpr T roundDownToMultipleOf(T x)
+{
+ static_assert(isPowerOfTwo(divisor), "'divisor' must be a power of two.");
+ return roundDownToMultipleOf(divisor, x);
+}
+
+template<typename T> inline void divideRoundingUp(T numerator, T denominator, T& quotient, T& remainder)
+{
+ // We expect the compiler to emit a single divide instruction to extract both the quotient and the remainder.
+ quotient = numerator / denominator;
+ remainder = numerator % denominator;
+ if (remainder)
+ quotient += 1;
+}
+
+template<typename T> inline T divideRoundingUp(T numerator, T denominator)
+{
+ return (numerator + denominator - 1) / denominator;
+}
+
+template<typename T> inline T roundUpToMultipleOfNonPowerOfTwo(size_t divisor, T x)
+{
+ return divideRoundingUp(x, divisor) * divisor;
+}
+
+// Version of sizeof that returns 0 for empty classes.
+
+template<typename T> inline constexpr size_t sizeOf()
+{
+ return std::is_empty<T>::value ? 0 : sizeof(T);
+}
+
+template<typename T> inline constexpr size_t bitCount()
+{
+ return sizeof(T) * 8;
+}
+
+inline constexpr unsigned long log2(unsigned long value)
+{
+ return bitCount<unsigned long>() - 1 - __builtin_clzl(value);
+}
+
+} // namespace bmalloc
+
+#endif // Algorithm_h
diff --git a/Source/bmalloc/bmalloc/Allocator.cpp b/Source/bmalloc/bmalloc/Allocator.cpp
new file mode 100644
index 000000000..9ac72d34e
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Allocator.cpp
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "Allocator.h"
+#include "BAssert.h"
+#include "Chunk.h"
+#include "Deallocator.h"
+#include "DebugHeap.h"
+#include "Heap.h"
+#include "PerProcess.h"
+#include "Sizes.h"
+#include <algorithm>
+#include <cstdlib>
+
+using namespace std;
+
+namespace bmalloc {
+
+Allocator::Allocator(Heap* heap, Deallocator& deallocator)
+ : m_debugHeap(heap->debugHeap())
+ , m_deallocator(deallocator)
+{
+ for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass)
+ m_bumpAllocators[sizeClass].init(objectSize(sizeClass));
+}
+
+Allocator::~Allocator()
+{
+ scavenge();
+}
+
+void* Allocator::tryAllocate(size_t size)
+{
+ if (m_debugHeap)
+ return m_debugHeap->malloc(size);
+
+ if (size <= smallMax)
+ return allocate(size);
+
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
+ return PerProcess<Heap>::getFastCase()->tryAllocateLarge(lock, alignment, size);
+}
+
+void* Allocator::allocate(size_t alignment, size_t size)
+{
+ bool crashOnFailure = true;
+ return allocateImpl(alignment, size, crashOnFailure);
+}
+
+void* Allocator::tryAllocate(size_t alignment, size_t size)
+{
+ bool crashOnFailure = false;
+ return allocateImpl(alignment, size, crashOnFailure);
+}
+
+void* Allocator::allocateImpl(size_t alignment, size_t size, bool crashOnFailure)
+{
+ BASSERT(isPowerOfTwo(alignment));
+
+ if (m_debugHeap)
+ return m_debugHeap->memalign(alignment, size, crashOnFailure);
+
+ if (!size)
+ size = alignment;
+
+ if (size <= smallMax && alignment <= smallMax)
+ return allocate(roundUpToMultipleOf(alignment, size));
+
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
+ Heap* heap = PerProcess<Heap>::getFastCase();
+ if (crashOnFailure)
+ return heap->allocateLarge(lock, alignment, size);
+ return heap->tryAllocateLarge(lock, alignment, size);
+}
+
+void* Allocator::reallocate(void* object, size_t newSize)
+{
+ if (m_debugHeap)
+ return m_debugHeap->realloc(object, newSize);
+
+ size_t oldSize = 0;
+ switch (objectType(object)) {
+ case ObjectType::Small: {
+ BASSERT(objectType(nullptr) == ObjectType::Small);
+ if (!object)
+ break;
+
+ size_t sizeClass = Object(object).page()->sizeClass();
+ oldSize = objectSize(sizeClass);
+ break;
+ }
+ case ObjectType::Large: {
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
+ oldSize = PerProcess<Heap>::getFastCase()->largeSize(lock, object);
+
+ if (newSize < oldSize && newSize > smallMax) {
+ PerProcess<Heap>::getFastCase()->shrinkLarge(lock, Range(object, oldSize), newSize);
+ return object;
+ }
+ break;
+ }
+ }
+
+ void* result = allocate(newSize);
+ size_t copySize = std::min(oldSize, newSize);
+ memcpy(result, object, copySize);
+ m_deallocator.deallocate(object);
+ return result;
+}
+
+void Allocator::scavenge()
+{
+ for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) {
+ BumpAllocator& allocator = m_bumpAllocators[sizeClass];
+ BumpRangeCache& bumpRangeCache = m_bumpRangeCaches[sizeClass];
+
+ while (allocator.canAllocate())
+ m_deallocator.deallocate(allocator.allocate());
+
+ while (bumpRangeCache.size()) {
+ allocator.refill(bumpRangeCache.pop());
+ while (allocator.canAllocate())
+ m_deallocator.deallocate(allocator.allocate());
+ }
+
+ allocator.clear();
+ }
+}
+
+NO_INLINE void Allocator::refillAllocatorSlowCase(BumpAllocator& allocator, size_t sizeClass)
+{
+ BumpRangeCache& bumpRangeCache = m_bumpRangeCaches[sizeClass];
+
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
+ m_deallocator.processObjectLog(lock);
+ PerProcess<Heap>::getFastCase()->allocateSmallBumpRanges(lock, sizeClass, allocator, bumpRangeCache);
+}
+
+INLINE void Allocator::refillAllocator(BumpAllocator& allocator, size_t sizeClass)
+{
+ BumpRangeCache& bumpRangeCache = m_bumpRangeCaches[sizeClass];
+ if (!bumpRangeCache.size())
+ return refillAllocatorSlowCase(allocator, sizeClass);
+ return allocator.refill(bumpRangeCache.pop());
+}
+
+NO_INLINE void* Allocator::allocateLarge(size_t size)
+{
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
+ return PerProcess<Heap>::getFastCase()->allocateLarge(lock, alignment, size);
+}
+
+NO_INLINE void* Allocator::allocateLogSizeClass(size_t size)
+{
+ size_t sizeClass = bmalloc::sizeClass(size);
+ BumpAllocator& allocator = m_bumpAllocators[sizeClass];
+ if (!allocator.canAllocate())
+ refillAllocator(allocator, sizeClass);
+ return allocator.allocate();
+}
+
+void* Allocator::allocateSlowCase(size_t size)
+{
+ if (m_debugHeap)
+ return m_debugHeap->malloc(size);
+
+ if (size <= maskSizeClassMax) {
+ size_t sizeClass = bmalloc::maskSizeClass(size);
+ BumpAllocator& allocator = m_bumpAllocators[sizeClass];
+ refillAllocator(allocator, sizeClass);
+ return allocator.allocate();
+ }
+
+ if (size <= smallMax)
+ return allocateLogSizeClass(size);
+
+ return allocateLarge(size);
+}
+
+} // namespace bmalloc
diff --git a/Source/bmalloc/bmalloc/Allocator.h b/Source/bmalloc/bmalloc/Allocator.h
new file mode 100644
index 000000000..1d5022b97
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Allocator.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2014, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Allocator_h
+#define Allocator_h
+
+#include "BumpAllocator.h"
+#include <array>
+
+namespace bmalloc {
+
+class Deallocator;
+class DebugHeap;
+class Heap;
+
+// Per-cache object allocator.
+
+class Allocator {
+public:
+ Allocator(Heap*, Deallocator&);
+ ~Allocator();
+
+ void* tryAllocate(size_t);
+ void* allocate(size_t);
+ void* tryAllocate(size_t alignment, size_t);
+ void* allocate(size_t alignment, size_t);
+ void* reallocate(void*, size_t);
+
+ void scavenge();
+
+private:
+ void* allocateImpl(size_t alignment, size_t, bool crashOnFailure);
+
+ bool allocateFastCase(size_t, void*&);
+ void* allocateSlowCase(size_t);
+
+ void* allocateLogSizeClass(size_t);
+ void* allocateLarge(size_t);
+
+ void refillAllocator(BumpAllocator&, size_t sizeClass);
+ void refillAllocatorSlowCase(BumpAllocator&, size_t sizeClass);
+
+ std::array<BumpAllocator, sizeClassCount> m_bumpAllocators;
+ std::array<BumpRangeCache, sizeClassCount> m_bumpRangeCaches;
+
+ DebugHeap* m_debugHeap;
+ Deallocator& m_deallocator;
+};
+
+inline bool Allocator::allocateFastCase(size_t size, void*& object)
+{
+ if (size > maskSizeClassMax)
+ return false;
+
+ BumpAllocator& allocator = m_bumpAllocators[maskSizeClass(size)];
+ if (!allocator.canAllocate())
+ return false;
+
+ object = allocator.allocate();
+ return true;
+}
+
+inline void* Allocator::allocate(size_t size)
+{
+ void* object;
+ if (!allocateFastCase(size, object))
+ return allocateSlowCase(size);
+ return object;
+}
+
+} // namespace bmalloc
+
+#endif // Allocator_h
diff --git a/Source/bmalloc/bmalloc/AsyncTask.h b/Source/bmalloc/bmalloc/AsyncTask.h
new file mode 100644
index 000000000..fa222fe25
--- /dev/null
+++ b/Source/bmalloc/bmalloc/AsyncTask.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AsyncTask_h
+#define AsyncTask_h
+
+#include "BAssert.h"
+#include "Inline.h"
+#include "Mutex.h"
+#include <atomic>
+#include <condition_variable>
+#include <thread>
+
+namespace bmalloc {
+
+template<typename Object, typename Function>
+class AsyncTask {
+public:
+ AsyncTask(Object&, const Function&);
+ ~AsyncTask();
+
+ void run();
+
+private:
+ enum State { Sleeping, Running, RunRequested };
+
+ void runSlowCase();
+
+ static void threadEntryPoint(AsyncTask*);
+ void threadRunLoop();
+
+ std::atomic<State> m_state;
+
+ Mutex m_conditionMutex;
+ std::condition_variable_any m_condition;
+
+ std::thread m_thread;
+
+ Object& m_object;
+ Function m_function;
+};
+
+template<typename Object, typename Function>
+AsyncTask<Object, Function>::AsyncTask(Object& object, const Function& function)
+ : m_state(Running)
+ , m_condition()
+ , m_thread(std::thread(&AsyncTask::threadEntryPoint, this))
+ , m_object(object)
+ , m_function(function)
+{
+}
+
+template<typename Object, typename Function>
+AsyncTask<Object, Function>::~AsyncTask()
+{
+ // We'd like to mark our destructor deleted but C++ won't allow it because
+ // we are an automatic member of Heap.
+ RELEASE_BASSERT(0);
+}
+
+template<typename Object, typename Function>
+inline void AsyncTask<Object, Function>::run()
+{
+ if (m_state == RunRequested)
+ return;
+ runSlowCase();
+}
+
+template<typename Object, typename Function>
+NO_INLINE void AsyncTask<Object, Function>::runSlowCase()
+{
+ State oldState = m_state.exchange(RunRequested);
+ if (oldState == RunRequested || oldState == Running)
+ return;
+
+ BASSERT(oldState == Sleeping);
+ std::lock_guard<Mutex> lock(m_conditionMutex);
+ m_condition.notify_all();
+}
+
+template<typename Object, typename Function>
+void AsyncTask<Object, Function>::threadEntryPoint(AsyncTask* asyncTask)
+{
+#if BOS(DARWIN)
+ pthread_set_qos_class_self_np(QOS_CLASS_USER_INTERACTIVE, 0);
+#endif
+
+ asyncTask->threadRunLoop();
+}
+
+template<typename Object, typename Function>
+void AsyncTask<Object, Function>::threadRunLoop()
+{
+ // This loop ratchets downward from most active to least active state. While
+ // we ratchet downward, any other thread may reset our state.
+
+ // We require any state change while we are sleeping to signal to our
+ // condition variable and wake us up.
+
+ while (1) {
+ State expectedState = RunRequested;
+ if (m_state.compare_exchange_weak(expectedState, Running))
+ (m_object.*m_function)();
+
+ expectedState = Running;
+ if (m_state.compare_exchange_weak(expectedState, Sleeping)) {
+ std::unique_lock<Mutex> lock(m_conditionMutex);
+ m_condition.wait(lock, [&]() { return m_state != Sleeping; });
+ }
+ }
+}
+
+} // namespace bmalloc
+
+#endif // AsyncTask_h
diff --git a/Source/bmalloc/bmalloc/BAssert.h b/Source/bmalloc/bmalloc/BAssert.h
new file mode 100644
index 000000000..e162ac4ae
--- /dev/null
+++ b/Source/bmalloc/bmalloc/BAssert.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "BPlatform.h"
+#include "Logging.h"
+
+#if BUSE(OS_LOG)
+#include <os/log.h>
+#endif
+
+#if defined(NDEBUG) && BOS(DARWIN)
+
+#if BCPU(X86_64) || BCPU(X86)
+#define BBreakpointTrap() __asm__ volatile ("int3")
+#elif BCPU(ARM_THUMB2)
+#define BBreakpointTrap() __asm__ volatile ("bkpt #0")
+#elif BCPU(ARM64)
+#define BBreakpointTrap() __asm__ volatile ("brk #0")
+#else
+#error "Unsupported CPU".
+#endif
+
+// Crash with a SIGTRAP i.e EXC_BREAKPOINT.
+// We are not using __builtin_trap because it is only guaranteed to abort, but not necessarily
+// trigger a SIGTRAP. Instead, we use inline asm to ensure that we trigger the SIGTRAP.
+#define BCRASH() do { \
+ BBreakpointTrap(); \
+ __builtin_unreachable(); \
+ } while (false)
+
+#else // not defined(NDEBUG) && BOS(DARWIN)
+
+#define BCRASH() do { \
+ *(int*)0xbbadbeef = 0; \
+} while (0);
+
+#endif // defined(NDEBUG) && BOS(DARWIN)
+
+#define BASSERT_IMPL(x) do { \
+ if (!(x)) \
+ BCRASH(); \
+} while (0);
+
+#define RELEASE_BASSERT(x) BASSERT_IMPL(x)
+
+#if BUSE(OS_LOG)
+#define BMALLOC_LOGGING_PREFIX "bmalloc: "
+#define BLOG_ERROR(format, ...) os_log_error(OS_LOG_DEFAULT, BMALLOC_LOGGING_PREFIX format, __VA_ARGS__)
+#else
+#define BLOG_ERROR(format, ...) bmalloc::reportAssertionFailureWithMessage(__FILE__, __LINE__, __PRETTY_FUNCTION__, format, __VA_ARGS__)
+#endif
+
+#if defined(NDEBUG)
+#define RELEASE_BASSERT_WITH_MESSAGE(x, format, ...) BASSERT_IMPL(x)
+#else
+#define RELEASE_BASSERT_WITH_MESSAGE(x, format, ...) do { \
+ if (!(x)) { \
+ BLOG_ERROR("ASSERTION FAILED: " #x " :: " format, ##__VA_ARGS__); \
+ BCRASH(); \
+ } \
+} while (0);
+#endif
+
+#define UNUSED(x) ((void)x)
+
+// ===== Release build =====
+
+#if defined(NDEBUG)
+
+#define BASSERT(x)
+
+#define IF_DEBUG(x)
+
+#endif // defined(NDEBUG)
+
+
+// ===== Debug build =====
+
+#if !defined(NDEBUG)
+
+#define BASSERT(x) BASSERT_IMPL(x)
+
+#define IF_DEBUG(x) (x)
+
+#endif // !defined(NDEBUG)
diff --git a/Source/bmalloc/bmalloc/BPlatform.h b/Source/bmalloc/bmalloc/BPlatform.h
new file mode 100644
index 000000000..8d768db63
--- /dev/null
+++ b/Source/bmalloc/bmalloc/BPlatform.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#ifdef __APPLE__
+#include <Availability.h>
+#include <AvailabilityMacros.h>
+#include <TargetConditionals.h>
+#endif
+
+#define BPLATFORM(PLATFORM) (defined BPLATFORM_##PLATFORM && BPLATFORM_##PLATFORM)
+#define BOS(OS) (defined BOS_##OS && BOS_##OS)
+
+#ifdef __APPLE__
+#define BOS_DARWIN 1
+#endif
+
+#ifdef __unix
+#define BOS_UNIX 1
+#endif
+
+#if BOS(DARWIN) && ((defined(TARGET_OS_EMBEDDED) && TARGET_OS_EMBEDDED) \
+ || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) \
+ || (defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR))
+#define BPLATFORM_IOS 1
+#elif BOS(DARWIN) && defined(TARGET_OS_MAC) && TARGET_OS_MAC
+#define BPLATFORM_MAC 1
+#endif
+
+/* ==== Policy decision macros: these define policy choices for a particular port. ==== */
+
+/* BUSE() - use a particular third-party library or optional OS service */
+#define BUSE(FEATURE) (defined BUSE_##FEATURE && BUSE_##FEATURE)
+
+/* ==== Platform adaptation macros: these describe properties of the target environment. ==== */
+
+/* BCPU() - the target CPU architecture */
+#define BCPU(_FEATURE) (defined BCPU_##_FEATURE && BCPU_##_FEATURE)
+
+/* BCPU(X86) - i386 / x86 32-bit */
+#if defined(__i386__) \
+|| defined(i386) \
+|| defined(_M_IX86) \
+|| defined(_X86_) \
+|| defined(__THW_INTEL)
+#define BCPU_X86 1
+#endif
+
+/* BCPU(X86_64) - AMD64 / Intel64 / x86_64 64-bit */
+#if defined(__x86_64__) \
+|| defined(_M_X64)
+#define BCPU_X86_64 1
+#endif
+
+/* BCPU(ARM64) - Apple */
+#if (defined(__arm64__) && defined(__APPLE__)) || defined(__aarch64__)
+#define BCPU_ARM64 1
+#endif
+
+/* BCPU(ARM) - ARM, any version*/
+#define BARM_ARCH_AT_LEAST(N) (BCPU(ARM) && BARM_ARCH_VERSION >= N)
+
+#if defined(arm) \
+|| defined(__arm__) \
+|| defined(ARM) \
+|| defined(_ARM_)
+#define BCPU_ARM 1
+
+/* Set BARM_ARCH_VERSION */
+#if defined(__ARM_ARCH_4__) \
+|| defined(__ARM_ARCH_4T__) \
+|| defined(__MARM_ARMV4__)
+#define BARM_ARCH_VERSION 4
+
+#elif defined(__ARM_ARCH_5__) \
+|| defined(__ARM_ARCH_5T__) \
+|| defined(__MARM_ARMV5__)
+#define BARM_ARCH_VERSION 5
+
+#elif defined(__ARM_ARCH_5E__) \
+|| defined(__ARM_ARCH_5TE__) \
+|| defined(__ARM_ARCH_5TEJ__)
+#define BARM_ARCH_VERSION 5
+
+#elif defined(__ARM_ARCH_6__) \
+|| defined(__ARM_ARCH_6J__) \
+|| defined(__ARM_ARCH_6K__) \
+|| defined(__ARM_ARCH_6Z__) \
+|| defined(__ARM_ARCH_6ZK__) \
+|| defined(__ARM_ARCH_6T2__) \
+|| defined(__ARMV6__)
+#define BARM_ARCH_VERSION 6
+
+#elif defined(__ARM_ARCH_7A__) \
+|| defined(__ARM_ARCH_7K__) \
+|| defined(__ARM_ARCH_7R__) \
+|| defined(__ARM_ARCH_7S__)
+#define BARM_ARCH_VERSION 7
+
+#elif defined(__ARM_ARCH_8__)
+#define BARM_ARCH_VERSION 8
+
+/* MSVC sets _M_ARM */
+#elif defined(_M_ARM)
+#define BARM_ARCH_VERSION _M_ARM
+
+/* RVCT sets _TARGET_ARCH_ARM */
+#elif defined(__TARGET_ARCH_ARM)
+#define BARM_ARCH_VERSION __TARGET_ARCH_ARM
+
+#else
+#define WTF_ARM_ARCH_VERSION 0
+
+#endif
+
+/* Set BTHUMB_ARCH_VERSION */
+#if defined(__ARM_ARCH_4T__)
+#define BTHUMB_ARCH_VERSION 1
+
+#elif defined(__ARM_ARCH_5T__) \
+|| defined(__ARM_ARCH_5TE__) \
+|| defined(__ARM_ARCH_5TEJ__)
+#define BTHUMB_ARCH_VERSION 2
+
+#elif defined(__ARM_ARCH_6J__) \
+|| defined(__ARM_ARCH_6K__) \
+|| defined(__ARM_ARCH_6Z__) \
+|| defined(__ARM_ARCH_6ZK__) \
+|| defined(__ARM_ARCH_6M__)
+#define BTHUMB_ARCH_VERSION 3
+
+#elif defined(__ARM_ARCH_6T2__) \
+|| defined(__ARM_ARCH_7__) \
+|| defined(__ARM_ARCH_7A__) \
+|| defined(__ARM_ARCH_7K__) \
+|| defined(__ARM_ARCH_7M__) \
+|| defined(__ARM_ARCH_7R__) \
+|| defined(__ARM_ARCH_7S__)
+#define BTHUMB_ARCH_VERSION 4
+
+/* RVCT sets __TARGET_ARCH_THUMB */
+#elif defined(__TARGET_ARCH_THUMB)
+#define BTHUMB_ARCH_VERSION __TARGET_ARCH_THUMB
+
+#else
+#define BTHUMB_ARCH_VERSION 0
+#endif
+
+/* BCPU(ARM_TRADITIONAL) - Thumb2 is not available, only traditional ARM (v4 or greater) */
+/* BCPU(ARM_THUMB2) - Thumb2 instruction set is available */
+/* Only one of these will be defined. */
+#if !defined(BCPU_ARM_TRADITIONAL) && !defined(BCPU_ARM_THUMB2)
+# if defined(thumb2) || defined(__thumb2__) \
+|| ((defined(__thumb) || defined(__thumb__)) && BTHUMB_ARCH_VERSION == 4)
+# define BCPU_ARM_TRADITIONAL 0
+# define BCPU_ARM_THUMB2 1
+# elif BARM_ARCH_AT_LEAST(4)
+# define BCPU_ARM_TRADITIONAL 1
+# define BCPU_ARM_THUMB2 0
+# else
+# error "Not supported ARM architecture"
+# endif
+#elif BCPU(ARM_TRADITIONAL) && BCPU(ARM_THUMB2) /* Sanity Check */
+# error "Cannot use both of BCPU_ARM_TRADITIONAL and BCPU_ARM_THUMB2 platforms"
+#endif /* !defined(BCPU_ARM_TRADITIONAL) && !defined(BCPU_ARM_THUMB2) */
+
+#endif /* ARM */
+
+#define BATTRIBUTE_PRINTF(formatStringArgument, extraArguments) __attribute__((__format__(printf, formatStringArgument, extraArguments)))
+
+#if (BPLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101200) || (BPLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000)
+#define BUSE_OS_LOG 1
+#endif
diff --git a/Source/bmalloc/bmalloc/BumpAllocator.h b/Source/bmalloc/bmalloc/BumpAllocator.h
new file mode 100644
index 000000000..6f7091736
--- /dev/null
+++ b/Source/bmalloc/bmalloc/BumpAllocator.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef BumpAllocator_h
+#define BumpAllocator_h
+
+#include "BAssert.h"
+#include "BumpRange.h"
+#include "ObjectType.h"
+
+namespace bmalloc {
+
+// Helper object for allocating small objects.
+
+class BumpAllocator {
+public:
+ BumpAllocator();
+ void init(size_t);
+
+ size_t size() { return m_size; }
+
+ bool isNull() { return !m_ptr; }
+ void clear();
+
+ bool canAllocate() { return !!m_remaining; }
+ void* allocate();
+
+ void refill(const BumpRange&);
+
+private:
+ char* m_ptr;
+ unsigned m_size;
+ unsigned m_remaining;
+};
+
+inline BumpAllocator::BumpAllocator()
+ : m_ptr()
+ , m_size()
+ , m_remaining()
+{
+}
+
+inline void BumpAllocator::init(size_t size)
+{
+ m_ptr = nullptr;
+ m_size = size;
+ m_remaining = 0;
+}
+
+inline void* BumpAllocator::allocate()
+{
+ BASSERT(m_remaining);
+
+ --m_remaining;
+ char* result = m_ptr;
+ m_ptr += m_size;
+ return result;
+}
+
+inline void BumpAllocator::refill(const BumpRange& bumpRange)
+{
+ BASSERT(!canAllocate());
+ m_ptr = bumpRange.begin;
+ m_remaining = bumpRange.objectCount;
+ BASSERT(canAllocate());
+}
+
+inline void BumpAllocator::clear()
+{
+ m_ptr = nullptr;
+ m_remaining = 0;
+}
+
+} // namespace bmalloc
+
+#endif // BumpAllocator_h
diff --git a/Source/bmalloc/bmalloc/BumpRange.h b/Source/bmalloc/bmalloc/BumpRange.h
new file mode 100644
index 000000000..5fa090912
--- /dev/null
+++ b/Source/bmalloc/bmalloc/BumpRange.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef BumpRange_h
+#define BumpRange_h
+
+#include "FixedVector.h"
+#include "Range.h"
+#include "Sizes.h"
+
+namespace bmalloc {
+
+struct BumpRange {
+ char* begin;
+ unsigned short objectCount;
+};
+
+typedef FixedVector<BumpRange, bumpRangeCacheCapacity> BumpRangeCache;
+
+} // namespace bmalloc
+
+#endif // BumpRange_h
diff --git a/Source/bmalloc/bmalloc/Cache.cpp b/Source/bmalloc/bmalloc/Cache.cpp
new file mode 100644
index 000000000..2fc703669
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Cache.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "Cache.h"
+#include "Heap.h"
+#include "Inline.h"
+#include "PerProcess.h"
+
+namespace bmalloc {
+
+void* Cache::operator new(size_t size)
+{
+ return vmAllocate(vmSize(size));
+}
+
+void Cache::operator delete(void* p, size_t size)
+{
+ vmDeallocate(p, vmSize(size));
+}
+
+void Cache::scavenge()
+{
+ Cache* cache = PerThread<Cache>::getFastCase();
+ if (!cache)
+ return;
+
+ cache->allocator().scavenge();
+ cache->deallocator().scavenge();
+}
+
+Cache::Cache()
+ : m_deallocator(PerProcess<Heap>::get())
+ , m_allocator(PerProcess<Heap>::get(), m_deallocator)
+{
+}
+
+NO_INLINE void* Cache::tryAllocateSlowCaseNullCache(size_t size)
+{
+ return PerThread<Cache>::getSlowCase()->allocator().tryAllocate(size);
+}
+
+NO_INLINE void* Cache::allocateSlowCaseNullCache(size_t size)
+{
+ return PerThread<Cache>::getSlowCase()->allocator().allocate(size);
+}
+
+NO_INLINE void* Cache::allocateSlowCaseNullCache(size_t alignment, size_t size)
+{
+ return PerThread<Cache>::getSlowCase()->allocator().allocate(alignment, size);
+}
+
+NO_INLINE void Cache::deallocateSlowCaseNullCache(void* object)
+{
+ PerThread<Cache>::getSlowCase()->deallocator().deallocate(object);
+}
+
+NO_INLINE void* Cache::reallocateSlowCaseNullCache(void* object, size_t newSize)
+{
+ return PerThread<Cache>::getSlowCase()->allocator().reallocate(object, newSize);
+}
+
+} // namespace bmalloc
diff --git a/Source/bmalloc/bmalloc/Cache.h b/Source/bmalloc/bmalloc/Cache.h
new file mode 100644
index 000000000..6c28b4a33
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Cache.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2014, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Cache_h
+#define Cache_h
+
+#include "Allocator.h"
+#include "Deallocator.h"
+#include "PerThread.h"
+
+namespace bmalloc {
+
+// Per-thread allocation / deallocation cache, backed by a per-process Heap.
+
+class Cache {
+public:
+ void* operator new(size_t);
+ void operator delete(void*, size_t);
+
+ static void* tryAllocate(size_t);
+ static void* allocate(size_t);
+ static void* tryAllocate(size_t alignment, size_t);
+ static void* allocate(size_t alignment, size_t);
+ static void deallocate(void*);
+ static void* reallocate(void*, size_t);
+
+ static void scavenge();
+
+ Cache();
+
+ Allocator& allocator() { return m_allocator; }
+ Deallocator& deallocator() { return m_deallocator; }
+
+private:
+ static void* tryAllocateSlowCaseNullCache(size_t);
+ static void* allocateSlowCaseNullCache(size_t);
+ static void* allocateSlowCaseNullCache(size_t alignment, size_t);
+ static void deallocateSlowCaseNullCache(void*);
+ static void* reallocateSlowCaseNullCache(void*, size_t);
+
+ Deallocator m_deallocator;
+ Allocator m_allocator;
+};
+
+inline void* Cache::tryAllocate(size_t size)
+{
+ Cache* cache = PerThread<Cache>::getFastCase();
+ if (!cache)
+ return tryAllocateSlowCaseNullCache(size);
+ return cache->allocator().tryAllocate(size);
+}
+
+inline void* Cache::allocate(size_t size)
+{
+ Cache* cache = PerThread<Cache>::getFastCase();
+ if (!cache)
+ return allocateSlowCaseNullCache(size);
+ return cache->allocator().allocate(size);
+}
+
+inline void* Cache::tryAllocate(size_t alignment, size_t size)
+{
+ Cache* cache = PerThread<Cache>::getFastCase();
+ if (!cache)
+ return allocateSlowCaseNullCache(alignment, size);
+ return cache->allocator().tryAllocate(alignment, size);
+}
+
+inline void* Cache::allocate(size_t alignment, size_t size)
+{
+ Cache* cache = PerThread<Cache>::getFastCase();
+ if (!cache)
+ return allocateSlowCaseNullCache(alignment, size);
+ return cache->allocator().allocate(alignment, size);
+}
+
+inline void Cache::deallocate(void* object)
+{
+ Cache* cache = PerThread<Cache>::getFastCase();
+ if (!cache)
+ return deallocateSlowCaseNullCache(object);
+ return cache->deallocator().deallocate(object);
+}
+
+inline void* Cache::reallocate(void* object, size_t newSize)
+{
+ Cache* cache = PerThread<Cache>::getFastCase();
+ if (!cache)
+ return reallocateSlowCaseNullCache(object, newSize);
+ return cache->allocator().reallocate(object, newSize);
+}
+
+} // namespace bmalloc
+
+#endif // Cache_h
diff --git a/Source/bmalloc/bmalloc/Chunk.h b/Source/bmalloc/bmalloc/Chunk.h
new file mode 100644
index 000000000..bfc30d210
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Chunk.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Chunk_h
+#define Chunk_h
+
+#include "Object.h"
+#include "Sizes.h"
+#include "SmallLine.h"
+#include "SmallPage.h"
+#include "VMAllocate.h"
+#include <array>
+
+namespace bmalloc {
+
+class Chunk {
+public:
+ static Chunk* get(void*);
+
+ Chunk(std::lock_guard<StaticMutex>&);
+
+ size_t offset(void*);
+
+ char* address(size_t offset);
+ SmallPage* page(size_t offset);
+ SmallLine* line(size_t offset);
+
+ char* bytes() { return reinterpret_cast<char*>(this); }
+ SmallLine* lines() { return &m_lines[0]; }
+ SmallPage* pages() { return &m_pages[0]; }
+
+private:
+ std::array<SmallLine, chunkSize / smallLineSize> m_lines;
+ std::array<SmallPage, chunkSize / smallPageSize> m_pages;
+};
+
+struct ChunkHash {
+ static unsigned hash(Chunk* key)
+ {
+ return static_cast<unsigned>(
+ reinterpret_cast<uintptr_t>(key) / chunkSize);
+ }
+};
+
+inline Chunk::Chunk(std::lock_guard<StaticMutex>&)
+{
+}
+
+inline Chunk* Chunk::get(void* address)
+{
+ return static_cast<Chunk*>(mask(address, chunkMask));
+}
+
+inline size_t Chunk::offset(void* address)
+{
+ BASSERT(address >= this);
+ BASSERT(address < bytes() + chunkSize);
+ return static_cast<char*>(address) - bytes();
+}
+
+inline char* Chunk::address(size_t offset)
+{
+ return bytes() + offset;
+}
+
+inline SmallPage* Chunk::page(size_t offset)
+{
+ size_t pageNumber = offset / smallPageSize;
+ SmallPage* page = &m_pages[pageNumber];
+ return page - page->slide();
+}
+
+inline SmallLine* Chunk::line(size_t offset)
+{
+ size_t lineNumber = offset / smallLineSize;
+ return &m_lines[lineNumber];
+}
+
+inline char* SmallLine::begin()
+{
+ Chunk* chunk = Chunk::get(this);
+ size_t lineNumber = this - chunk->lines();
+ size_t offset = lineNumber * smallLineSize;
+ return &reinterpret_cast<char*>(chunk)[offset];
+}
+
+inline char* SmallLine::end()
+{
+ return begin() + smallLineSize;
+}
+
+inline SmallLine* SmallPage::begin()
+{
+ BASSERT(!m_slide);
+ Chunk* chunk = Chunk::get(this);
+ size_t pageNumber = this - chunk->pages();
+ size_t lineNumber = pageNumber * smallPageLineCount;
+ return &chunk->lines()[lineNumber];
+}
+
+inline Object::Object(void* object)
+ : m_chunk(Chunk::get(object))
+ , m_offset(m_chunk->offset(object))
+{
+}
+
+inline Object::Object(Chunk* chunk, void* object)
+ : m_chunk(chunk)
+ , m_offset(m_chunk->offset(object))
+{
+ BASSERT(chunk == Chunk::get(object));
+}
+
+inline char* Object::address()
+{
+ return m_chunk->address(m_offset);
+}
+
+inline SmallLine* Object::line()
+{
+ return m_chunk->line(m_offset);
+}
+
+inline SmallPage* Object::page()
+{
+ return m_chunk->page(m_offset);
+}
+
+}; // namespace bmalloc
+
+#endif // Chunk
diff --git a/Source/bmalloc/bmalloc/Deallocator.cpp b/Source/bmalloc/bmalloc/Deallocator.cpp
new file mode 100644
index 000000000..56bb4d6a9
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Deallocator.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "BAssert.h"
+#include "Chunk.h"
+#include "Deallocator.h"
+#include "DebugHeap.h"
+#include "Heap.h"
+#include "Inline.h"
+#include "Object.h"
+#include "PerProcess.h"
+#include <algorithm>
+#include <cstdlib>
+#include <sys/mman.h>
+
+using namespace std;
+
+namespace bmalloc {
+
+Deallocator::Deallocator(Heap* heap)
+ : m_debugHeap(heap->debugHeap())
+{
+ if (m_debugHeap) {
+ // Fill the object log in order to disable the fast path.
+ while (m_objectLog.size() != m_objectLog.capacity())
+ m_objectLog.push(nullptr);
+ }
+}
+
+Deallocator::~Deallocator()
+{
+ scavenge();
+}
+
+void Deallocator::scavenge()
+{
+ if (m_debugHeap)
+ return;
+
+ processObjectLog();
+}
+
+void Deallocator::processObjectLog(std::lock_guard<StaticMutex>& lock)
+{
+ Heap* heap = PerProcess<Heap>::getFastCase();
+
+ for (Object object : m_objectLog)
+ heap->derefSmallLine(lock, object);
+
+ m_objectLog.clear();
+}
+
+void Deallocator::processObjectLog()
+{
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
+ processObjectLog(lock);
+}
+
+void Deallocator::deallocateSlowCase(void* object)
+{
+ if (m_debugHeap)
+ return m_debugHeap->free(object);
+
+ if (!object)
+ return;
+
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
+ if (PerProcess<Heap>::getFastCase()->isLarge(lock, object)) {
+ PerProcess<Heap>::getFastCase()->deallocateLarge(lock, object);
+ return;
+ }
+
+ if (m_objectLog.size() == m_objectLog.capacity())
+ processObjectLog(lock);
+
+ m_objectLog.push(object);
+}
+
+} // namespace bmalloc
diff --git a/Source/bmalloc/bmalloc/Deallocator.h b/Source/bmalloc/bmalloc/Deallocator.h
new file mode 100644
index 000000000..00b116d3b
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Deallocator.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Deallocator_h
+#define Deallocator_h
+
+#include "FixedVector.h"
+#include <mutex>
+
+namespace bmalloc {
+
+class DebugHeap;
+class Heap;
+class StaticMutex;
+
+// Per-cache object deallocator.
+
+class Deallocator {
+public:
+ Deallocator(Heap*);
+ ~Deallocator();
+
+ void deallocate(void*);
+ void scavenge();
+
+ void processObjectLog();
+ void processObjectLog(std::lock_guard<StaticMutex>&);
+
+private:
+ bool deallocateFastCase(void*);
+ void deallocateSlowCase(void*);
+
+ FixedVector<void*, deallocatorLogCapacity> m_objectLog;
+ DebugHeap* m_debugHeap;
+};
+
+inline bool Deallocator::deallocateFastCase(void* object)
+{
+ BASSERT(mightBeLarge(nullptr));
+ if (mightBeLarge(object))
+ return false;
+
+ if (m_objectLog.size() == m_objectLog.capacity())
+ return false;
+
+ m_objectLog.push(object);
+ return true;
+}
+
+inline void Deallocator::deallocate(void* object)
+{
+ if (!deallocateFastCase(object))
+ deallocateSlowCase(object);
+}
+
+} // namespace bmalloc
+
+#endif // Deallocator_h
diff --git a/Source/bmalloc/bmalloc/DebugHeap.cpp b/Source/bmalloc/bmalloc/DebugHeap.cpp
new file mode 100644
index 000000000..6a0f07cf1
--- /dev/null
+++ b/Source/bmalloc/bmalloc/DebugHeap.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "DebugHeap.h"
+#include "BAssert.h"
+#include "BPlatform.h"
+#include <cstdlib>
+#include <thread>
+
+namespace bmalloc {
+
+#if BOS(DARWIN)
+
+DebugHeap::DebugHeap(std::lock_guard<StaticMutex>&)
+ : m_zone(malloc_create_zone(0, 0))
+{
+ malloc_set_zone_name(m_zone, "WebKit Using System Malloc");
+}
+
+void* DebugHeap::malloc(size_t size)
+{
+ void* result = malloc_zone_malloc(m_zone, size);
+ if (!result)
+ BCRASH();
+ return result;
+}
+
+void* DebugHeap::memalign(size_t alignment, size_t size, bool crashOnFailure)
+{
+ void* result = malloc_zone_memalign(m_zone, alignment, size);
+ if (!result && crashOnFailure)
+ BCRASH();
+ return result;
+}
+
+void* DebugHeap::realloc(void* object, size_t size)
+{
+ void* result = malloc_zone_realloc(m_zone, object, size);
+ if (!result)
+ BCRASH();
+ return result;
+}
+
+void DebugHeap::free(void* object)
+{
+ malloc_zone_free(m_zone, object);
+}
+
+#else
+
+DebugHeap::DebugHeap(std::lock_guard<StaticMutex>&)
+{
+}
+
+void* DebugHeap::malloc(size_t size)
+{
+ void* result = ::malloc(size);
+ if (!result)
+ BCRASH();
+ return result;
+}
+
+void* DebugHeap::memalign(size_t alignment, size_t size, bool crashOnFailure)
+{
+ void* result;
+ if (posix_memalign(&result, alignment, size)) {
+ if (crashOnFailure)
+ BCRASH();
+ return nullptr;
+ }
+ return result;
+}
+
+void* DebugHeap::realloc(void* object, size_t size)
+{
+ void* result = ::realloc(object, size);
+ if (!result)
+ BCRASH();
+ return result;
+}
+
+void DebugHeap::free(void* object)
+{
+ ::free(object);
+}
+
+#endif
+
+} // namespace bmalloc
diff --git a/Source/bmalloc/bmalloc/DebugHeap.h b/Source/bmalloc/bmalloc/DebugHeap.h
new file mode 100644
index 000000000..cda96d076
--- /dev/null
+++ b/Source/bmalloc/bmalloc/DebugHeap.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "StaticMutex.h"
+#include <mutex>
+#if BOS(DARWIN)
+#include <malloc/malloc.h>
+#endif
+
+namespace bmalloc {
+
+class DebugHeap {
+public:
+ DebugHeap(std::lock_guard<StaticMutex>&);
+
+ void* malloc(size_t);
+ void* memalign(size_t alignment, size_t, bool crashOnFailure);
+ void* realloc(void*, size_t);
+ void free(void*);
+
+private:
+#if BOS(DARWIN)
+ malloc_zone_t* m_zone;
+#endif
+};
+
+} // namespace bmalloc
diff --git a/Source/bmalloc/bmalloc/Environment.cpp b/Source/bmalloc/bmalloc/Environment.cpp
new file mode 100644
index 000000000..19e047f2c
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Environment.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "BPlatform.h"
+#include "Environment.h"
+#include <cstdlib>
+#include <cstring>
+#if BOS(DARWIN)
+#include <mach-o/dyld.h>
+#elif BOS(UNIX)
+#include <dlfcn.h>
+#endif
+
+namespace bmalloc {
+
+static bool isMallocEnvironmentVariableSet()
+{
+ const char* list[] = {
+ "Malloc",
+ "MallocLogFile",
+ "MallocGuardEdges",
+ "MallocDoNotProtectPrelude",
+ "MallocDoNotProtectPostlude",
+ "MallocStackLogging",
+ "MallocStackLoggingNoCompact",
+ "MallocStackLoggingDirectory",
+ "MallocScribble",
+ "MallocCheckHeapStart",
+ "MallocCheckHeapEach",
+ "MallocCheckHeapSleep",
+ "MallocCheckHeapAbort",
+ "MallocErrorAbort",
+ "MallocCorruptionAbort",
+ "MallocHelp"
+ };
+ size_t size = sizeof(list) / sizeof(const char*);
+
+ for (size_t i = 0; i < size; ++i) {
+ if (getenv(list[i]))
+ return true;
+ }
+
+ return false;
+}
+
+static bool isLibgmallocEnabled()
+{
+ char* variable = getenv("DYLD_INSERT_LIBRARIES");
+ if (!variable)
+ return false;
+ if (!strstr(variable, "libgmalloc"))
+ return false;
+ return true;
+}
+
+static bool isSanitizerEnabled()
+{
+#if BOS(DARWIN)
+ static const char sanitizerPrefix[] = "/libclang_rt.";
+ static const char asanName[] = "asan_";
+ static const char tsanName[] = "tsan_";
+ uint32_t imageCount = _dyld_image_count();
+ for (uint32_t i = 0; i < imageCount; ++i) {
+ const char* imageName = _dyld_get_image_name(i);
+ if (!imageName)
+ continue;
+ if (const char* s = strstr(imageName, sanitizerPrefix)) {
+ const char* sanitizerName = s + sizeof(sanitizerPrefix) - 1;
+ if (!strncmp(sanitizerName, asanName, sizeof(asanName) - 1))
+ return true;
+ if (!strncmp(sanitizerName, tsanName, sizeof(tsanName) - 1))
+ return true;
+ }
+ }
+ return false;
+#elif BOS(UNIX)
+ void* handle = dlopen(nullptr, RTLD_NOW);
+ if (!handle)
+ return false;
+ bool result = !!dlsym(handle, "__asan_init") || !!dlsym(handle, "__tsan_init");
+ dlclose(handle);
+ return result;
+#else
+ return false;
+#endif
+}
+
+Environment::Environment()
+ : m_isDebugHeapEnabled(computeIsDebugHeapEnabled())
+{
+}
+
+bool Environment::computeIsDebugHeapEnabled()
+{
+ if (isMallocEnvironmentVariableSet())
+ return true;
+ if (isLibgmallocEnabled())
+ return true;
+ if (isSanitizerEnabled())
+ return true;
+ return false;
+}
+
+} // namespace bmalloc
diff --git a/Source/bmalloc/bmalloc/Environment.h b/Source/bmalloc/bmalloc/Environment.h
new file mode 100644
index 000000000..3e46a4359
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Environment.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Environment_h
+#define Environment_h
+
+namespace bmalloc {
+
+class Environment {
+public:
+ Environment();
+
+ bool isDebugHeapEnabled() { return m_isDebugHeapEnabled; }
+
+private:
+ bool computeIsDebugHeapEnabled();
+
+ bool m_isDebugHeapEnabled;
+};
+
+} // namespace bmalloc
+
+#endif // Environment_h
diff --git a/Source/bmalloc/bmalloc/FixedVector.h b/Source/bmalloc/bmalloc/FixedVector.h
new file mode 100644
index 000000000..51e0a7201
--- /dev/null
+++ b/Source/bmalloc/bmalloc/FixedVector.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef FixedVector_h
+#define FixedVector_h
+
+#include "BAssert.h"
+#include <array>
+#include <cstddef>
+#include <type_traits>
+
+namespace bmalloc {
+
+// A replacement for std::vector that uses a fixed-sized inline backing store.
+
+template<typename T, size_t Capacity>
+class FixedVector {
+ static_assert(std::is_trivially_destructible<T>::value, "FixedVector must have a trivial destructor.");
+public:
+ FixedVector(const FixedVector&) = delete;
+ FixedVector& operator=(const FixedVector&) = delete;
+
+ FixedVector();
+
+ const T* begin() const { return &m_buffer[0]; }
+ const T* end() const { return begin() + size(); }
+
+ size_t size() const { return m_size; }
+ size_t capacity() const { return Capacity; }
+
+ T& operator[](size_t);
+
+ void push(const T&);
+ void push(const T*, const T*);
+ T pop();
+
+ void shrink(T*);
+ void shrink(size_t);
+
+ void clear() { shrink(static_cast<size_t>(0)); }
+ bool isEmpty() { return !m_size; }
+
+private:
+ size_t m_size;
+ std::array<T, Capacity> m_buffer;
+};
+
+template<typename T, size_t Capacity>
+inline FixedVector<T, Capacity>::FixedVector()
+ : m_size(0)
+{
+}
+
+template<typename T, size_t Capacity>
+inline T& FixedVector<T, Capacity>::operator[](size_t i)
+{
+ BASSERT(i < m_size);
+ return m_buffer[i];
+}
+
+template<typename T, size_t Capacity>
+inline void FixedVector<T, Capacity>::push(const T& value)
+{
+ BASSERT(m_size < Capacity);
+ m_buffer[m_size++] = value;
+}
+
+template<typename T, size_t Capacity>
+inline void FixedVector<T, Capacity>::push(const T* begin, const T* end)
+{
+ for (const T* it = begin; it != end; ++it)
+ push(*it);
+}
+
+template<typename T, size_t Capacity>
+inline T FixedVector<T, Capacity>::pop()
+{
+ BASSERT(m_size);
+ return m_buffer[--m_size];
+}
+
+template<typename T, size_t Capacity>
+inline void FixedVector<T, Capacity>::shrink(size_t size)
+{
+ BASSERT(size <= m_size);
+ m_size = size;
+}
+
+template<typename T, size_t Capacity>
+inline void FixedVector<T, Capacity>::shrink(T* end)
+{
+ shrink(end - begin());
+}
+
+} // namespace bmalloc
+
+#endif // FixedVector_h
diff --git a/Source/bmalloc/bmalloc/Heap.cpp b/Source/bmalloc/bmalloc/Heap.cpp
new file mode 100644
index 000000000..d353bd2f1
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Heap.cpp
@@ -0,0 +1,430 @@
+/*
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "Heap.h"
+#include "BumpAllocator.h"
+#include "Chunk.h"
+#include "DebugHeap.h"
+#include "PerProcess.h"
+#include "SmallLine.h"
+#include "SmallPage.h"
+#include <thread>
+
+namespace bmalloc {
+
+Heap::Heap(std::lock_guard<StaticMutex>&)
+ : m_vmPageSizePhysical(vmPageSizePhysical())
+ , m_scavenger(*this, &Heap::concurrentScavenge)
+ , m_debugHeap(nullptr)
+{
+ RELEASE_BASSERT(vmPageSizePhysical() >= smallPageSize);
+ RELEASE_BASSERT(vmPageSize() >= vmPageSizePhysical());
+
+ initializeLineMetadata();
+ initializePageMetadata();
+
+ if (m_environment.isDebugHeapEnabled())
+ m_debugHeap = PerProcess<DebugHeap>::get();
+}
+
+void Heap::initializeLineMetadata()
+{
+ size_t sizeClassCount = bmalloc::sizeClass(smallLineSize);
+ size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
+ m_smallLineMetadata.grow(sizeClassCount * smallLineCount);
+
+ for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) {
+ size_t size = objectSize(sizeClass);
+ LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
+
+ size_t object = 0;
+ size_t line = 0;
+ while (object < m_vmPageSizePhysical) {
+ line = object / smallLineSize;
+ size_t leftover = object % smallLineSize;
+
+ size_t objectCount;
+ size_t remainder;
+ divideRoundingUp(smallLineSize - leftover, size, objectCount, remainder);
+
+ pageMetadata[line] = { static_cast<unsigned char>(leftover), static_cast<unsigned char>(objectCount) };
+
+ object += objectCount * size;
+ }
+
+ // Don't allow the last object in a page to escape the page.
+ if (object > m_vmPageSizePhysical) {
+ BASSERT(pageMetadata[line].objectCount);
+ --pageMetadata[line].objectCount;
+ }
+ }
+}
+
+void Heap::initializePageMetadata()
+{
+ auto computePageSize = [&](size_t sizeClass) {
+ size_t size = objectSize(sizeClass);
+ if (sizeClass < bmalloc::sizeClass(smallLineSize))
+ return m_vmPageSizePhysical;
+
+ for (size_t pageSize = m_vmPageSizePhysical;
+ pageSize < pageSizeMax;
+ pageSize += m_vmPageSizePhysical) {
+ RELEASE_BASSERT(pageSize <= chunkSize / 2);
+ size_t waste = pageSize % size;
+ if (waste <= pageSize / pageSizeWasteFactor)
+ return pageSize;
+ }
+
+ return pageSizeMax;
+ };
+
+ for (size_t i = 0; i < sizeClassCount; ++i)
+ m_pageClasses[i] = (computePageSize(i) - 1) / smallPageSize;
+}
+
+void Heap::concurrentScavenge()
+{
+#if BOS(DARWIN)
+ pthread_set_qos_class_self_np(m_requestedScavengerThreadQOSClass, 0);
+#endif
+
+ std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
+
+ scavenge(lock, Async);
+}
+
+void Heap::scavenge(std::unique_lock<StaticMutex>& lock, ScavengeMode scavengeMode)
+{
+ m_isAllocatingPages.fill(false);
+ m_isAllocatingLargePages = false;
+
+ if (scavengeMode == Async)
+ sleep(lock, scavengeSleepDuration);
+
+ scavengeSmallPages(lock, scavengeMode);
+ scavengeLargeObjects(lock, scavengeMode);
+}
+
+void Heap::scavengeSmallPages(std::unique_lock<StaticMutex>& lock, ScavengeMode scavengeMode)
+{
+ for (size_t pageClass = 0; pageClass < pageClassCount; pageClass++) {
+ auto& smallPages = m_smallPages[pageClass];
+
+ while (!smallPages.isEmpty()) {
+ if (m_isAllocatingPages[pageClass]) {
+ m_scavenger.run();
+ break;
+ }
+
+ SmallPage* page = smallPages.pop();
+ m_vmHeap.deallocateSmallPage(lock, pageClass, page, scavengeMode);
+ }
+ }
+}
+
+void Heap::scavengeLargeObjects(std::unique_lock<StaticMutex>& lock, ScavengeMode scavengeMode)
+{
+ auto& ranges = m_largeFree.ranges();
+ for (size_t i = ranges.size(); i-- > 0; i = std::min(i, ranges.size())) {
+ if (m_isAllocatingLargePages) {
+ m_scavenger.run();
+ break;
+ }
+
+ auto range = ranges.pop(i);
+
+ if (scavengeMode == Async)
+ lock.unlock();
+ vmDeallocatePhysicalPagesSloppy(range.begin(), range.size());
+ if (scavengeMode == Async)
+ lock.lock();
+
+ range.setPhysicalSize(0);
+ ranges.push(range);
+ }
+}
+
+SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass)
+{
+ if (!m_smallPagesWithFreeLines[sizeClass].isEmpty())
+ return m_smallPagesWithFreeLines[sizeClass].popFront();
+
+ SmallPage* page = [&]() {
+ size_t pageClass = m_pageClasses[sizeClass];
+ if (!m_smallPages[pageClass].isEmpty())
+ return m_smallPages[pageClass].pop();
+
+ m_isAllocatingPages[pageClass] = true;
+
+ SmallPage* page = m_vmHeap.allocateSmallPage(lock, pageClass);
+ m_objectTypes.set(Chunk::get(page), ObjectType::Small);
+ return page;
+ }();
+
+ page->setSizeClass(sizeClass);
+ return page;
+}
+
+void Heap::deallocateSmallLine(std::lock_guard<StaticMutex>& lock, Object object)
+{
+ BASSERT(!object.line()->refCount(lock));
+ SmallPage* page = object.page();
+ page->deref(lock);
+
+ if (!page->hasFreeLines(lock)) {
+ page->setHasFreeLines(lock, true);
+ m_smallPagesWithFreeLines[page->sizeClass()].push(page);
+ }
+
+ if (page->refCount(lock))
+ return;
+
+ size_t sizeClass = page->sizeClass();
+ size_t pageClass = m_pageClasses[sizeClass];
+
+ m_smallPagesWithFreeLines[sizeClass].remove(page);
+ m_smallPages[pageClass].push(page);
+
+ m_scavenger.run();
+}
+
+void Heap::allocateSmallBumpRangesByMetadata(
+ std::lock_guard<StaticMutex>& lock, size_t sizeClass,
+ BumpAllocator& allocator, BumpRangeCache& rangeCache)
+{
+ SmallPage* page = allocateSmallPage(lock, sizeClass);
+ SmallLine* lines = page->begin();
+ BASSERT(page->hasFreeLines(lock));
+ size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
+ LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
+
+ auto findSmallBumpRange = [&](size_t& lineNumber) {
+ for ( ; lineNumber < smallLineCount; ++lineNumber) {
+ if (!lines[lineNumber].refCount(lock)) {
+ if (pageMetadata[lineNumber].objectCount)
+ return true;
+ }
+ }
+ return false;
+ };
+
+ auto allocateSmallBumpRange = [&](size_t& lineNumber) -> BumpRange {
+ char* begin = lines[lineNumber].begin() + pageMetadata[lineNumber].startOffset;
+ unsigned short objectCount = 0;
+
+ for ( ; lineNumber < smallLineCount; ++lineNumber) {
+ if (lines[lineNumber].refCount(lock))
+ break;
+
+ if (!pageMetadata[lineNumber].objectCount)
+ continue;
+
+ objectCount += pageMetadata[lineNumber].objectCount;
+ lines[lineNumber].ref(lock, pageMetadata[lineNumber].objectCount);
+ page->ref(lock);
+ }
+ return { begin, objectCount };
+ };
+
+ size_t lineNumber = 0;
+ for (;;) {
+ if (!findSmallBumpRange(lineNumber)) {
+ page->setHasFreeLines(lock, false);
+ BASSERT(allocator.canAllocate());
+ return;
+ }
+
+ // In a fragmented page, some free ranges might not fit in the cache.
+ if (rangeCache.size() == rangeCache.capacity()) {
+ m_smallPagesWithFreeLines[sizeClass].push(page);
+ BASSERT(allocator.canAllocate());
+ return;
+ }
+
+ BumpRange bumpRange = allocateSmallBumpRange(lineNumber);
+ if (allocator.canAllocate())
+ rangeCache.push(bumpRange);
+ else
+ allocator.refill(bumpRange);
+ }
+}
+
+void Heap::allocateSmallBumpRangesByObject(
+ std::lock_guard<StaticMutex>& lock, size_t sizeClass,
+ BumpAllocator& allocator, BumpRangeCache& rangeCache)
+{
+ size_t size = allocator.size();
+ SmallPage* page = allocateSmallPage(lock, sizeClass);
+ BASSERT(page->hasFreeLines(lock));
+
+ auto findSmallBumpRange = [&](Object& it, Object& end) {
+ for ( ; it + size <= end; it = it + size) {
+ if (!it.line()->refCount(lock))
+ return true;
+ }
+ return false;
+ };
+
+ auto allocateSmallBumpRange = [&](Object& it, Object& end) -> BumpRange {
+ char* begin = it.address();
+ unsigned short objectCount = 0;
+ for ( ; it + size <= end; it = it + size) {
+ if (it.line()->refCount(lock))
+ break;
+
+ ++objectCount;
+ it.line()->ref(lock);
+ it.page()->ref(lock);
+ }
+ return { begin, objectCount };
+ };
+
+ Object it(page->begin()->begin());
+ Object end(it + pageSize(m_pageClasses[sizeClass]));
+ for (;;) {
+ if (!findSmallBumpRange(it, end)) {
+ page->setHasFreeLines(lock, false);
+ BASSERT(allocator.canAllocate());
+ return;
+ }
+
+ // In a fragmented page, some free ranges might not fit in the cache.
+ if (rangeCache.size() == rangeCache.capacity()) {
+ m_smallPagesWithFreeLines[sizeClass].push(page);
+ BASSERT(allocator.canAllocate());
+ return;
+ }
+
+ BumpRange bumpRange = allocateSmallBumpRange(it, end);
+ if (allocator.canAllocate())
+ rangeCache.push(bumpRange);
+ else
+ allocator.refill(bumpRange);
+ }
+}
+
+LargeRange Heap::splitAndAllocate(LargeRange& range, size_t alignment, size_t size)
+{
+ LargeRange prev;
+ LargeRange next;
+
+ size_t alignmentMask = alignment - 1;
+ if (test(range.begin(), alignmentMask)) {
+ size_t prefixSize = roundUpToMultipleOf(alignment, range.begin()) - range.begin();
+ std::pair<LargeRange, LargeRange> pair = range.split(prefixSize);
+ prev = pair.first;
+ range = pair.second;
+ }
+
+ if (range.size() - size > size / pageSizeWasteFactor) {
+ std::pair<LargeRange, LargeRange> pair = range.split(size);
+ range = pair.first;
+ next = pair.second;
+ }
+
+ if (range.physicalSize() < range.size()) {
+ m_isAllocatingLargePages = true;
+
+ vmAllocatePhysicalPagesSloppy(range.begin() + range.physicalSize(), range.size() - range.physicalSize());
+ range.setPhysicalSize(range.size());
+ }
+
+ if (prev)
+ m_largeFree.add(prev);
+
+ if (next)
+ m_largeFree.add(next);
+
+ m_objectTypes.set(Chunk::get(range.begin()), ObjectType::Large);
+
+ m_largeAllocated.set(range.begin(), range.size());
+ return range;
+}
+
+void* Heap::tryAllocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size)
+{
+ BASSERT(isPowerOfTwo(alignment));
+
+ size_t roundedSize = size ? roundUpToMultipleOf(largeAlignment, size) : largeAlignment;
+ if (roundedSize < size) // Check for overflow
+ return nullptr;
+ size = roundedSize;
+
+ size_t roundedAlignment = roundUpToMultipleOf<largeAlignment>(alignment);
+ if (roundedAlignment < alignment) // Check for overflow
+ return nullptr;
+ alignment = roundedAlignment;
+
+ LargeRange range = m_largeFree.remove(alignment, size);
+ if (!range) {
+ range = m_vmHeap.tryAllocateLargeChunk(lock, alignment, size);
+ if (!range)
+ return nullptr;
+
+ m_largeFree.add(range);
+ range = m_largeFree.remove(alignment, size);
+ }
+
+ return splitAndAllocate(range, alignment, size).begin();
+}
+
+void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size)
+{
+ void* result = tryAllocateLarge(lock, alignment, size);
+ RELEASE_BASSERT(result);
+ return result;
+}
+
+bool Heap::isLarge(std::lock_guard<StaticMutex>&, void* object)
+{
+ return m_objectTypes.get(Object(object).chunk()) == ObjectType::Large;
+}
+
+size_t Heap::largeSize(std::lock_guard<StaticMutex>&, void* object)
+{
+ return m_largeAllocated.get(object);
+}
+
+void Heap::shrinkLarge(std::lock_guard<StaticMutex>&, const Range& object, size_t newSize)
+{
+ BASSERT(object.size() > newSize);
+
+ size_t size = m_largeAllocated.remove(object.begin());
+ LargeRange range = LargeRange(object, size);
+ splitAndAllocate(range, alignment, newSize);
+
+ m_scavenger.run();
+}
+
+void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, void* object)
+{
+ size_t size = m_largeAllocated.remove(object);
+ m_largeFree.add(LargeRange(object, size, size));
+
+ m_scavenger.run();
+}
+
+} // namespace bmalloc
diff --git a/Source/bmalloc/bmalloc/Heap.h b/Source/bmalloc/bmalloc/Heap.h
new file mode 100644
index 000000000..63616a5af
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Heap.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Heap_h
+#define Heap_h
+
+#include "AsyncTask.h"
+#include "BumpRange.h"
+#include "Environment.h"
+#include "LargeMap.h"
+#include "LineMetadata.h"
+#include "List.h"
+#include "Map.h"
+#include "Mutex.h"
+#include "Object.h"
+#include "SmallLine.h"
+#include "SmallPage.h"
+#include "VMHeap.h"
+#include "Vector.h"
+#include <array>
+#include <mutex>
+
+namespace bmalloc {
+
+class BeginTag;
+class BumpAllocator;
+class DebugHeap;
+class EndTag;
+
+class Heap {
+public:
+ Heap(std::lock_guard<StaticMutex>&);
+
+ DebugHeap* debugHeap() { return m_debugHeap; }
+
+ void allocateSmallBumpRanges(std::lock_guard<StaticMutex>&, size_t sizeClass, BumpAllocator&, BumpRangeCache&);
+ void derefSmallLine(std::lock_guard<StaticMutex>&, Object);
+
+ void* allocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t);
+ void* tryAllocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t);
+ void deallocateLarge(std::lock_guard<StaticMutex>&, void*);
+
+ bool isLarge(std::lock_guard<StaticMutex>&, void*);
+ size_t largeSize(std::lock_guard<StaticMutex>&, void*);
+ void shrinkLarge(std::lock_guard<StaticMutex>&, const Range&, size_t);
+
+ void scavenge(std::unique_lock<StaticMutex>&, ScavengeMode);
+
+#if BOS(DARWIN)
+ void setScavengerThreadQOSClass(qos_class_t overrideClass) { m_requestedScavengerThreadQOSClass = overrideClass; }
+#endif
+
+private:
+ struct LargeObjectHash {
+ static unsigned hash(void* key)
+ {
+ return static_cast<unsigned>(
+ reinterpret_cast<uintptr_t>(key) / smallMax);
+ }
+ };
+
+ ~Heap() = delete;
+
+ void initializeLineMetadata();
+ void initializePageMetadata();
+
+ void allocateSmallBumpRangesByMetadata(std::lock_guard<StaticMutex>&,
+ size_t sizeClass, BumpAllocator&, BumpRangeCache&);
+ void allocateSmallBumpRangesByObject(std::lock_guard<StaticMutex>&,
+ size_t sizeClass, BumpAllocator&, BumpRangeCache&);
+
+ SmallPage* allocateSmallPage(std::lock_guard<StaticMutex>&, size_t sizeClass);
+
+ void deallocateSmallLine(std::lock_guard<StaticMutex>&, Object);
+
+ void mergeLarge(BeginTag*&, EndTag*&, Range&);
+ void mergeLargeLeft(EndTag*&, BeginTag*&, Range&, bool& inVMHeap);
+ void mergeLargeRight(EndTag*&, BeginTag*&, Range&, bool& inVMHeap);
+
+ LargeRange splitAndAllocate(LargeRange&, size_t alignment, size_t);
+
+ void concurrentScavenge();
+ void scavengeSmallPages(std::unique_lock<StaticMutex>&, ScavengeMode);
+ void scavengeLargeObjects(std::unique_lock<StaticMutex>&, ScavengeMode);
+
+ size_t m_vmPageSizePhysical;
+ Vector<LineMetadata> m_smallLineMetadata;
+ std::array<size_t, sizeClassCount> m_pageClasses;
+
+ std::array<List<SmallPage>, sizeClassCount> m_smallPagesWithFreeLines;
+ std::array<List<SmallPage>, pageClassCount> m_smallPages;
+
+ Map<void*, size_t, LargeObjectHash> m_largeAllocated;
+ LargeMap m_largeFree;
+
+ Map<Chunk*, ObjectType, ChunkHash> m_objectTypes;
+
+ std::array<bool, pageClassCount> m_isAllocatingPages;
+ bool m_isAllocatingLargePages;
+
+ AsyncTask<Heap, decltype(&Heap::concurrentScavenge)> m_scavenger;
+
+ Environment m_environment;
+ DebugHeap* m_debugHeap;
+
+ VMHeap m_vmHeap;
+
+#if BOS(DARWIN)
+ qos_class_t m_requestedScavengerThreadQOSClass { QOS_CLASS_UNSPECIFIED };
+#endif
+};
+
+inline void Heap::allocateSmallBumpRanges(
+ std::lock_guard<StaticMutex>& lock, size_t sizeClass,
+ BumpAllocator& allocator, BumpRangeCache& rangeCache)
+{
+ if (sizeClass < bmalloc::sizeClass(smallLineSize))
+ return allocateSmallBumpRangesByMetadata(lock, sizeClass, allocator, rangeCache);
+ return allocateSmallBumpRangesByObject(lock, sizeClass, allocator, rangeCache);
+}
+
+inline void Heap::derefSmallLine(std::lock_guard<StaticMutex>& lock, Object object)
+{
+ if (!object.line()->deref(lock))
+ return;
+ deallocateSmallLine(lock, object);
+}
+
+} // namespace bmalloc
+
+#endif // Heap_h
diff --git a/Source/bmalloc/bmalloc/Inline.h b/Source/bmalloc/bmalloc/Inline.h
new file mode 100644
index 000000000..c005417a6
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Inline.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Inline_h
+#define Inline_h
+
+#define INLINE __attribute__((always_inline)) inline
+
+#define NO_INLINE __attribute__((noinline))
+
+#endif // Inline_h
diff --git a/Source/bmalloc/bmalloc/LargeMap.cpp b/Source/bmalloc/bmalloc/LargeMap.cpp
new file mode 100644
index 000000000..443ad9125
--- /dev/null
+++ b/Source/bmalloc/bmalloc/LargeMap.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "LargeMap.h"
+#include <utility>
+
+namespace bmalloc {
+
+LargeRange LargeMap::remove(size_t alignment, size_t size)
+{
+ size_t alignmentMask = alignment - 1;
+
+ LargeRange* candidate = m_free.end();
+ for (LargeRange* it = m_free.begin(); it != m_free.end(); ++it) {
+ if (it->size() < size)
+ continue;
+
+ if (candidate != m_free.end() && candidate->begin() < it->begin())
+ continue;
+
+ if (test(it->begin(), alignmentMask)) {
+ char* aligned = roundUpToMultipleOf(alignment, it->begin());
+ if (aligned < it->begin()) // Check for overflow.
+ continue;
+
+ char* alignedEnd = aligned + size;
+ if (alignedEnd < aligned) // Check for overflow.
+ continue;
+
+ if (alignedEnd > it->end())
+ continue;
+ }
+
+ candidate = it;
+ }
+
+ if (candidate == m_free.end())
+ return LargeRange();
+
+ return m_free.pop(candidate);
+}
+
+void LargeMap::add(const LargeRange& range)
+{
+ LargeRange merged = range;
+
+ for (size_t i = 0; i < m_free.size(); ++i) {
+ if (!canMerge(merged, m_free[i]))
+ continue;
+
+ merged = merge(merged, m_free.pop(i--));
+ }
+
+ m_free.push(merged);
+}
+
+} // namespace bmalloc
diff --git a/Source/bmalloc/bmalloc/LargeMap.h b/Source/bmalloc/bmalloc/LargeMap.h
new file mode 100644
index 000000000..2ad94b0eb
--- /dev/null
+++ b/Source/bmalloc/bmalloc/LargeMap.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LargeMap_h
+#define LargeMap_h
+
+#include "LargeRange.h"
+#include "Vector.h"
+#include <algorithm>
+
+namespace bmalloc {
+
+class LargeMap {
+public:
+ void add(const LargeRange&);
+ LargeRange remove(size_t alignment, size_t);
+ Vector<LargeRange>& ranges() { return m_free; }
+
+private:
+ Vector<LargeRange> m_free;
+};
+
+} // namespace bmalloc
+
+#endif // LargeMap_h
diff --git a/Source/bmalloc/bmalloc/LargeRange.h b/Source/bmalloc/bmalloc/LargeRange.h
new file mode 100644
index 000000000..935aec7dd
--- /dev/null
+++ b/Source/bmalloc/bmalloc/LargeRange.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LargeRange_h
+#define LargeRange_h
+
+#include "BAssert.h"
+#include "Range.h"
+
+namespace bmalloc {
+
+class LargeRange : public Range {
+public:
+ LargeRange()
+ : Range()
+ , m_physicalSize(0)
+ {
+ }
+
+ LargeRange(const Range& other, size_t physicalSize)
+ : Range(other)
+ , m_physicalSize(physicalSize)
+ {
+ }
+
+ LargeRange(void* begin, size_t size, size_t physicalSize)
+ : Range(begin, size)
+ , m_physicalSize(physicalSize)
+ {
+ }
+
+ size_t physicalSize() const { return m_physicalSize; }
+ void setPhysicalSize(size_t physicalSize) { m_physicalSize = physicalSize; }
+
+ std::pair<LargeRange, LargeRange> split(size_t) const;
+
+ bool operator<(const void* other) const { return begin() < other; }
+ bool operator<(const LargeRange& other) const { return begin() < other.begin(); }
+
+private:
+ size_t m_physicalSize;
+};
+
+inline bool canMerge(const LargeRange& a, const LargeRange& b)
+{
+ if (a.end() == b.begin())
+ return true;
+
+ if (b.end() == a.begin())
+ return true;
+
+ return false;
+}
+
+inline LargeRange merge(const LargeRange& a, const LargeRange& b)
+{
+ const LargeRange& left = std::min(a, b);
+ if (left.size() == left.physicalSize()) {
+ return LargeRange(
+ left.begin(),
+ a.size() + b.size(),
+ a.physicalSize() + b.physicalSize());
+ }
+
+ return LargeRange(
+ left.begin(),
+ a.size() + b.size(),
+ left.physicalSize());
+}
+
+inline std::pair<LargeRange, LargeRange> LargeRange::split(size_t size) const
+{
+ BASSERT(size <= this->size());
+
+ if (size <= physicalSize()) {
+ LargeRange left(begin(), size, size);
+ LargeRange right(left.end(), this->size() - size, physicalSize() - size);
+ return std::make_pair(left, right);
+ }
+
+ LargeRange left(begin(), size, physicalSize());
+ LargeRange right(left.end(), this->size() - size, 0);
+ return std::make_pair(left, right);
+}
+
+} // namespace bmalloc
+
+#endif // LargeRange_h
diff --git a/Source/bmalloc/bmalloc/LineMetadata.h b/Source/bmalloc/bmalloc/LineMetadata.h
new file mode 100644
index 000000000..80b338111
--- /dev/null
+++ b/Source/bmalloc/bmalloc/LineMetadata.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LineMetadata_h
+#define LineMetadata_h
+
+namespace bmalloc {
+
+struct LineMetadata {
+ unsigned char startOffset;
+ unsigned char objectCount;
+};
+
+static_assert(
+ smallLineSize - alignment <= std::numeric_limits<unsigned char>::max(),
+ "maximum object offset must fit in LineMetadata::startOffset");
+
+static_assert(
+ smallLineSize / alignment <= std::numeric_limits<unsigned char>::max(),
+ "maximum object count must fit in LineMetadata::objectCount");
+
+} // namespace bmalloc
+
+#endif // LineMetadata_h
diff --git a/Source/bmalloc/bmalloc/List.h b/Source/bmalloc/bmalloc/List.h
new file mode 100644
index 000000000..406bb0f2e
--- /dev/null
+++ b/Source/bmalloc/bmalloc/List.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef List_h
+#define List_h
+
+namespace bmalloc {
+
+template<typename T>
+struct ListNode {
+ ListNode()
+ : prev(this)
+ , next(this)
+ {
+ }
+
+ ListNode<T>* prev;
+ ListNode<T>* next;
+};
+
+template<typename T>
+class List {
+ static_assert(std::is_trivially_destructible<T>::value, "List must have a trivial destructor.");
+public:
+ bool isEmpty() { return m_root.next == &m_root; }
+
+ T* head() { return static_cast<T*>(m_root.next); }
+ T* tail() { return static_cast<T*>(m_root.prev); }
+
+ void push(T* node)
+ {
+ ListNode<T>* it = tail();
+ insertAfter(it, node);
+ }
+
+ T* pop()
+ {
+ ListNode<T>* result = tail();
+ remove(result);
+ return static_cast<T*>(result);
+ }
+
+ T* popFront()
+ {
+ ListNode<T>* result = head();
+ remove(result);
+ return static_cast<T*>(result);
+ }
+
+ void insertAfter(ListNode<T>* it, ListNode<T>* node)
+ {
+ ListNode<T>* prev = it;
+ ListNode<T>* next = it->next;
+
+ node->next = next;
+ next->prev = node;
+
+ node->prev = prev;
+ prev->next = node;
+ }
+
+ void remove(ListNode<T>* node)
+ {
+ ListNode<T>* next = node->next;
+ ListNode<T>* prev = node->prev;
+
+ next->prev = prev;
+ prev->next = next;
+
+ node->prev = node;
+ node->next = node;
+ }
+
+private:
+ ListNode<T> m_root;
+};
+
+} // namespace bmalloc
+
+#endif // List_h
diff --git a/Source/bmalloc/bmalloc/Logging.cpp b/Source/bmalloc/bmalloc/Logging.cpp
new file mode 100644
index 000000000..e1fb60cb8
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Logging.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "Logging.h"
+#include "BPlatform.h"
+
+#if !BUSE(OS_LOG)
+#include <stdarg.h>
+#include <stdio.h>
+#endif
+
+#if BPLATFORM(IOS)
+#include <mach/exception_types.h>
+#include <objc/objc.h>
+#include <unistd.h>
+
+#include "BSoftLinking.h"
+BSOFT_LINK_PRIVATE_FRAMEWORK(CrashReporterSupport);
+BSOFT_LINK_FUNCTION(CrashReporterSupport, SimulateCrash, BOOL, (pid_t pid, mach_exception_data_type_t exceptionCode, id description), (pid, exceptionCode, description));
+#endif
+
+namespace bmalloc {
+
+void logVMFailure()
+{
+#if BPLATFORM(IOS)
+ const mach_exception_data_type_t kExceptionCode = 0xc105ca11;
+ SimulateCrash(getpid(), kExceptionCode, nullptr);
+#endif
+}
+
+#if !BUSE(OS_LOG)
+void reportAssertionFailureWithMessage(const char* file, int line, const char* function, const char* format, ...)
+{
+ va_list args;
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+ fprintf(stderr, "%s(%d) : %s\n", file, line, function);
+}
+#endif
+
+} // namespace bmalloc
diff --git a/Source/bmalloc/bmalloc/Logging.h b/Source/bmalloc/bmalloc/Logging.h
new file mode 100644
index 000000000..d92067bc0
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Logging.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "BPlatform.h"
+
+namespace bmalloc {
+
+void logVMFailure();
+
+#if !BUSE(OS_LOG)
+void reportAssertionFailureWithMessage(const char* file, int line, const char* function, const char* format, ...) BATTRIBUTE_PRINTF(4, 5);
+#endif
+
+} // namespace bmalloc
diff --git a/Source/bmalloc/bmalloc/Map.h b/Source/bmalloc/bmalloc/Map.h
new file mode 100644
index 000000000..1124a8c5d
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Map.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Map_h
+#define Map_h
+
+#include "Inline.h"
+#include "Sizes.h"
+#include "Vector.h"
+
+namespace bmalloc {
+
+class SmallPage;
+
+template<typename Key, typename Value, typename Hash> class Map {
+ static_assert(std::is_trivially_destructible<Key>::value, "Map must have a trivial destructor.");
+ static_assert(std::is_trivially_destructible<Value>::value, "Map must have a trivial destructor.");
+public:
+ struct Bucket {
+ Key key;
+ Value value;
+ };
+
+ size_t size() { return m_keyCount; }
+ size_t capacity() { return m_table.size(); }
+
+ // key must be in the map.
+ Value& get(const Key& key)
+ {
+ auto& bucket = find(key, [&](const Bucket& bucket) { return bucket.key == key; });
+ return bucket.value;
+ }
+
+ void set(const Key& key, const Value& value)
+ {
+ if (shouldGrow())
+ rehash();
+
+ auto& bucket = find(key, [&](const Bucket& bucket) { return !bucket.key || bucket.key == key; });
+ if (!bucket.key) {
+ bucket.key = key;
+ ++m_keyCount;
+ }
+ bucket.value = value;
+ }
+
+ // key must be in the map.
+ Value remove(const Key& key)
+ {
+ if (shouldShrink())
+ rehash();
+
+ auto& bucket = find(key, [&](const Bucket& bucket) { return bucket.key == key; });
+ Value value = bucket.value;
+ bucket.key = Key();
+ --m_keyCount;
+ return value;
+ }
+
+private:
+ static const unsigned minCapacity = 16;
+ static const unsigned maxLoad = 2;
+ static const unsigned rehashLoad = 4;
+ static const unsigned minLoad = 8;
+
+ bool shouldGrow() { return m_keyCount * maxLoad >= capacity(); }
+ bool shouldShrink() { return m_keyCount * minLoad <= capacity() && capacity() > minCapacity; }
+
+ void rehash();
+
+ template<typename Predicate>
+ Bucket& find(const Key& key, const Predicate& predicate)
+ {
+ for (unsigned h = Hash::hash(key); ; ++h) {
+ unsigned i = h & m_tableMask;
+
+ Bucket& bucket = m_table[i];
+ if (predicate(bucket))
+ return bucket;
+ }
+ }
+
+ unsigned m_keyCount;
+ unsigned m_tableMask;
+ Vector<Bucket> m_table;
+};
+
+template<typename Key, typename Value, typename Hash>
+void Map<Key, Value, Hash>::rehash()
+{
+ auto oldTable = std::move(m_table);
+
+ size_t newCapacity = std::max(minCapacity, m_keyCount * rehashLoad);
+ m_table.resize(newCapacity);
+
+ m_keyCount = 0;
+ m_tableMask = newCapacity - 1;
+
+ for (auto& bucket : oldTable) {
+ if (!bucket.key)
+ continue;
+
+ BASSERT(!shouldGrow());
+ set(bucket.key, bucket.value);
+ }
+}
+
+template<typename Key, typename Value, typename Hash> const unsigned Map<Key, Value, Hash>::minCapacity;
+
+} // namespace bmalloc
+
+#endif // Map_h
diff --git a/Source/bmalloc/bmalloc/Mutex.h b/Source/bmalloc/bmalloc/Mutex.h
new file mode 100644
index 000000000..f0e59982b
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Mutex.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Mutex_h
+#define Mutex_h
+
+#include "StaticMutex.h"
+
+// A fast replacement for std::mutex, for use in standard storage.
+
+namespace bmalloc {
+
+class Mutex : public StaticMutex {
+public:
+ Mutex();
+};
+
+inline Mutex::Mutex()
+{
+ // StaticMutex requires explicit initialization when used in non-static storage.
+ init();
+}
+
+} // namespace bmalloc
+
+#endif // Mutex_h
diff --git a/Source/bmalloc/bmalloc/Object.h b/Source/bmalloc/bmalloc/Object.h
new file mode 100644
index 000000000..70a4751fe
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Object.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Object_h
+#define Object_h
+
+#include <cstddef>
+
+namespace bmalloc {
+
+class Chunk;
+class SmallLine;
+class SmallPage;
+
+class Object {
+public:
+ Object(void*);
+ Object(Chunk*, void*);
+ Object(Chunk* chunk, size_t offset)
+ : m_chunk(chunk)
+ , m_offset(offset)
+ {
+ }
+
+ Chunk* chunk() { return m_chunk; }
+ size_t offset() { return m_offset; }
+ char* address();
+
+ SmallLine* line();
+ SmallPage* page();
+
+ Object operator+(size_t);
+ Object operator-(size_t);
+ bool operator<=(const Object&);
+
+private:
+ Chunk* m_chunk;
+ size_t m_offset;
+};
+
+inline Object Object::operator+(size_t offset)
+{
+ return Object(m_chunk, m_offset + offset);
+}
+
+inline Object Object::operator-(size_t offset)
+{
+ return Object(m_chunk, m_offset - offset);
+}
+
+inline bool Object::operator<=(const Object& other)
+{
+ BASSERT(m_chunk == other.m_chunk);
+ return m_offset <= other.m_offset;
+}
+
+}; // namespace bmalloc
+
+#endif // Object_h
diff --git a/Source/bmalloc/bmalloc/ObjectType.cpp b/Source/bmalloc/bmalloc/ObjectType.cpp
new file mode 100644
index 000000000..a8d339723
--- /dev/null
+++ b/Source/bmalloc/bmalloc/ObjectType.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ObjectType.h"
+
+#include "Chunk.h"
+#include "Heap.h"
+#include "Object.h"
+#include "PerProcess.h"
+
+namespace bmalloc {
+
+ObjectType objectType(void* object)
+{
+ if (mightBeLarge(object)) {
+ if (!object)
+ return ObjectType::Small;
+
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
+ if (PerProcess<Heap>::getFastCase()->isLarge(lock, object))
+ return ObjectType::Large;
+ }
+
+ return ObjectType::Small;
+}
+
+} // namespace bmalloc
diff --git a/Source/bmalloc/bmalloc/ObjectType.h b/Source/bmalloc/bmalloc/ObjectType.h
new file mode 100644
index 000000000..2cc3ab0b6
--- /dev/null
+++ b/Source/bmalloc/bmalloc/ObjectType.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ObjectType_h
+#define ObjectType_h
+
+#include "BAssert.h"
+#include "Sizes.h"
+
+namespace bmalloc {
+
+enum class ObjectType : unsigned char { Small, Large };
+
+ObjectType objectType(void*);
+
+inline bool mightBeLarge(void* object)
+{
+ return !test(object, largeAlignmentMask);
+}
+
+} // namespace bmalloc
+
+#endif // ObjectType_h
diff --git a/Source/bmalloc/bmalloc/PerProcess.h b/Source/bmalloc/bmalloc/PerProcess.h
new file mode 100644
index 000000000..e0f801481
--- /dev/null
+++ b/Source/bmalloc/bmalloc/PerProcess.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PerProcess_h
+#define PerProcess_h
+
+#include "Inline.h"
+#include "Sizes.h"
+#include "StaticMutex.h"
+#include <mutex>
+
+namespace bmalloc {
+
+// Usage:
+// Object* object = PerProcess<Object>::get();
+// x = object->field->field;
+//
+// Object will be instantiated only once, even in the face of concurrency.
+//
+// NOTE: If you observe global side-effects of the Object constructor, be
+// sure to lock the Object mutex. For example:
+//
+// Object() : m_field(...) { globalFlag = true }
+//
+// Object* object = PerProcess<Object>::get();
+// x = object->m_field; // OK
+// if (gobalFlag) { ... } // Undefined behavior.
+//
+// std::lock_guard<StaticMutex> lock(PerProcess<Object>::mutex());
+// Object* object = PerProcess<Object>::get(lock);
+// if (gobalFlag) { ... } // OK.
+
+template<typename T>
+class PerProcess {
+public:
+ static T* get();
+ static T* getFastCase();
+
+ static StaticMutex& mutex() { return s_mutex; }
+
+private:
+ static T* getSlowCase();
+
+ static std::atomic<T*> s_object;
+ static StaticMutex s_mutex;
+
+ typedef typename std::aligned_storage<sizeof(T), std::alignment_of<T>::value>::type Memory;
+ static Memory s_memory;
+};
+
+template<typename T>
+INLINE T* PerProcess<T>::getFastCase()
+{
+ return s_object.load(std::memory_order_consume);
+}
+
+template<typename T>
+INLINE T* PerProcess<T>::get()
+{
+ T* object = getFastCase();
+ if (!object)
+ return getSlowCase();
+ return object;
+}
+
+template<typename T>
+NO_INLINE T* PerProcess<T>::getSlowCase()
+{
+ std::lock_guard<StaticMutex> lock(s_mutex);
+ if (!s_object.load(std::memory_order_consume)) {
+ T* t = new (&s_memory) T(lock);
+ s_object.store(t, std::memory_order_release);
+ }
+ return s_object.load(std::memory_order_consume);
+}
+
+template<typename T>
+std::atomic<T*> PerProcess<T>::s_object;
+
+template<typename T>
+StaticMutex PerProcess<T>::s_mutex;
+
+template<typename T>
+typename PerProcess<T>::Memory PerProcess<T>::s_memory;
+
+} // namespace bmalloc
+
+#endif // PerProcess_h
diff --git a/Source/bmalloc/bmalloc/PerThread.h b/Source/bmalloc/bmalloc/PerThread.h
new file mode 100644
index 000000000..3fe156895
--- /dev/null
+++ b/Source/bmalloc/bmalloc/PerThread.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PerThread_h
+#define PerThread_h
+
+#include "BPlatform.h"
+#include "Inline.h"
+#include <mutex>
+#include <pthread.h>
+
+#if defined(__has_include)
+#if __has_include(<System/pthread_machdep.h>)
+#include <System/pthread_machdep.h>
+#define HAVE_PTHREAD_MACHDEP_H 1
+#else
+#define HAVE_PTHREAD_MACHDEP_H 0
+#endif
+#else
+#define HAVE_PTHREAD_MACHDEP_H 0
+#endif
+
+namespace bmalloc {
+
+// Usage:
+// Object* object = PerThread<Object>::get();
+
+template<typename T>
+class PerThread {
+public:
+ static T* get();
+ static T* getFastCase();
+ static T* getSlowCase();
+
+private:
+ static void destructor(void*);
+};
+
+#if HAVE_PTHREAD_MACHDEP_H
+
+class Cache;
+template<typename T> struct PerThreadStorage;
+
+// For now, we only support PerThread<Cache>. We can expand to other types by
+// using more keys.
+template<> struct PerThreadStorage<Cache> {
+ static const pthread_key_t key = __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0;
+
+ static void* get()
+ {
+ return _pthread_getspecific_direct(key);
+ }
+
+ static void init(void* object, void (*destructor)(void*))
+ {
+ _pthread_setspecific_direct(key, object);
+ pthread_key_init_np(key, destructor);
+ }
+};
+
+#else
+
+template<typename T> struct PerThreadStorage {
+ static bool s_didInitialize;
+ static pthread_key_t s_key;
+ static std::once_flag s_onceFlag;
+
+ static void* get()
+ {
+ if (!s_didInitialize)
+ return nullptr;
+ return pthread_getspecific(s_key);
+ }
+
+ static void init(void* object, void (*destructor)(void*))
+ {
+ std::call_once(s_onceFlag, [destructor]() {
+ int error = pthread_key_create(&s_key, destructor);
+ if (error)
+ BCRASH();
+ s_didInitialize = true;
+ });
+ pthread_setspecific(s_key, object);
+ }
+};
+
+template<typename T> bool PerThreadStorage<T>::s_didInitialize;
+template<typename T> pthread_key_t PerThreadStorage<T>::s_key;
+template<typename T> std::once_flag PerThreadStorage<T>::s_onceFlag;
+
+#endif
+
+template<typename T>
+INLINE T* PerThread<T>::getFastCase()
+{
+ return static_cast<T*>(PerThreadStorage<T>::get());
+}
+
+template<typename T>
+inline T* PerThread<T>::get()
+{
+ T* t = getFastCase();
+ if (!t)
+ return getSlowCase();
+ return t;
+}
+
+template<typename T>
+void PerThread<T>::destructor(void* p)
+{
+ T* t = static_cast<T*>(p);
+ delete t;
+}
+
+template<typename T>
+T* PerThread<T>::getSlowCase()
+{
+ BASSERT(!getFastCase());
+ T* t = new T;
+ PerThreadStorage<T>::init(t, destructor);
+ return t;
+}
+
+} // namespace bmalloc
+
+#endif // PerThread_h
diff --git a/Source/bmalloc/bmalloc/Range.h b/Source/bmalloc/bmalloc/Range.h
new file mode 100644
index 000000000..7d446f7ee
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Range.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Range_h
+#define Range_h
+
+#include <algorithm>
+#include <cstddef>
+
+namespace bmalloc {
+
+class Range {
+public:
+ Range()
+ : m_begin(0)
+ , m_size(0)
+ {
+ }
+
+ Range(void* start, size_t size)
+ : m_begin(static_cast<char*>(start))
+ , m_size(size)
+ {
+ }
+
+ char* begin() const { return m_begin; }
+ char* end() const { return m_begin + m_size; }
+ size_t size() const { return m_size; }
+
+ bool operator!() const { return !m_size; }
+ explicit operator bool() const { return !!*this; }
+ bool operator<(const Range& other) const { return m_begin < other.m_begin; }
+
+private:
+ char* m_begin;
+ size_t m_size;
+};
+
+inline bool canMerge(const Range& a, const Range& b)
+{
+ return a.begin() == b.end() || a.end() == b.begin();
+}
+
+inline Range merge(const Range& a, const Range& b)
+{
+ return Range(std::min(a.begin(), b.begin()), a.size() + b.size());
+}
+
+} // namespace bmalloc
+
+#endif // Range_h
diff --git a/Source/bmalloc/bmalloc/ScopeExit.h b/Source/bmalloc/bmalloc/ScopeExit.h
new file mode 100644
index 000000000..5b11c0c2a
--- /dev/null
+++ b/Source/bmalloc/bmalloc/ScopeExit.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <type_traits>
+#include <utility>
+
+namespace bmalloc {
+
+template<typename ExitFunction>
+class ScopeExit {
+public:
+ explicit ScopeExit(ExitFunction&& exitFunction)
+ : m_exitFunction(exitFunction)
+ {
+ }
+
+ ~ScopeExit()
+ {
+ m_exitFunction();
+ }
+
+private:
+ ExitFunction m_exitFunction;
+};
+
+template<typename ExitFunction>
+ScopeExit<ExitFunction> makeScopeExit(ExitFunction&& exitFunction)
+{
+ return ScopeExit<ExitFunction>(std::forward<ExitFunction>(exitFunction));
+}
+
+} // namespace bmalloc
diff --git a/Source/bmalloc/bmalloc/Sizes.h b/Source/bmalloc/bmalloc/Sizes.h
new file mode 100644
index 000000000..6b1f394e3
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Sizes.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Sizes_h
+#define Sizes_h
+
+#include "Algorithm.h"
+#include "BPlatform.h"
+#include <algorithm>
+#include <cstdint>
+#include <cstddef>
+#include <limits>
+#include <type_traits>
+#include <chrono>
+
+namespace bmalloc {
+
+// Repository for malloc sizing constants and calculations.
+
+namespace Sizes {
+ static const size_t kB = 1024;
+ static const size_t MB = kB * kB;
+
+ static const size_t alignment = 8;
+ static const size_t alignmentMask = alignment - 1ul;
+
+ static const size_t chunkSize = 2 * MB;
+ static const size_t chunkMask = ~(chunkSize - 1ul);
+
+ static const size_t smallLineSize = 256;
+ static const size_t smallPageSize = 4 * kB;
+ static const size_t smallPageLineCount = smallPageSize / smallLineSize;
+
+ static const size_t maskSizeClassMax = 512;
+ static const size_t smallMax = 32 * kB;
+
+ static const size_t pageSizeMax = smallMax * 2;
+ static const size_t pageClassCount = pageSizeMax / smallPageSize;
+
+ static const size_t pageSizeWasteFactor = 8;
+ static const size_t logWasteFactor = 8;
+
+ static const size_t largeAlignment = smallMax / pageSizeWasteFactor;
+ static const size_t largeAlignmentMask = largeAlignment - 1;
+
+ static const size_t deallocatorLogCapacity = 256;
+ static const size_t bumpRangeCacheCapacity = 3;
+
+ static const std::chrono::milliseconds scavengeSleepDuration = std::chrono::milliseconds(512);
+
+ static const size_t maskSizeClassCount = maskSizeClassMax / alignment;
+
+ inline constexpr size_t maskSizeClass(size_t size)
+ {
+ // We mask to accommodate zero.
+ return mask((size - 1) / alignment, maskSizeClassCount - 1);
+ }
+
+ inline size_t maskObjectSize(size_t maskSizeClass)
+ {
+ return (maskSizeClass + 1) * alignment;
+ }
+
+ static const size_t logAlignmentMin = maskSizeClassMax / logWasteFactor;
+
+ static const size_t logSizeClassCount = (log2(smallMax) - log2(maskSizeClassMax)) * logWasteFactor;
+
+ inline size_t logSizeClass(size_t size)
+ {
+ size_t base = log2(size - 1) - log2(maskSizeClassMax);
+ size_t offset = (size - 1 - (maskSizeClassMax << base));
+ return base * logWasteFactor + offset / (logAlignmentMin << base);
+ }
+
+ inline size_t logObjectSize(size_t logSizeClass)
+ {
+ size_t base = logSizeClass / logWasteFactor;
+ size_t offset = logSizeClass % logWasteFactor;
+ return (maskSizeClassMax << base) + (offset + 1) * (logAlignmentMin << base);
+ }
+
+ static const size_t sizeClassCount = maskSizeClassCount + logSizeClassCount;
+
+ inline size_t sizeClass(size_t size)
+ {
+ if (size <= maskSizeClassMax)
+ return maskSizeClass(size);
+ return maskSizeClassCount + logSizeClass(size);
+ }
+
+ inline size_t objectSize(size_t sizeClass)
+ {
+ if (sizeClass < maskSizeClassCount)
+ return maskObjectSize(sizeClass);
+ return logObjectSize(sizeClass - maskSizeClassCount);
+ }
+
+ inline size_t pageSize(size_t pageClass)
+ {
+ return (pageClass + 1) * smallPageSize;
+ }
+}
+
+using namespace Sizes;
+
+} // namespace bmalloc
+
+#endif // Sizes_h
diff --git a/Source/bmalloc/bmalloc/SmallLine.h b/Source/bmalloc/bmalloc/SmallLine.h
new file mode 100644
index 000000000..188930edc
--- /dev/null
+++ b/Source/bmalloc/bmalloc/SmallLine.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SmallLine_h
+#define SmallLine_h
+
+#include "BAssert.h"
+#include "Mutex.h"
+#include "ObjectType.h"
+#include <mutex>
+
+namespace bmalloc {
+
+class SmallLine {
+public:
+ void ref(std::lock_guard<StaticMutex>&, unsigned char = 1);
+ bool deref(std::lock_guard<StaticMutex>&);
+ unsigned refCount(std::lock_guard<StaticMutex>&) { return m_refCount; }
+
+ char* begin();
+ char* end();
+
+private:
+ unsigned char m_refCount;
+
+static_assert(
+ smallLineSize / alignment <= std::numeric_limits<decltype(m_refCount)>::max(),
+ "maximum object count must fit in SmallLine::m_refCount");
+
+};
+
+inline void SmallLine::ref(std::lock_guard<StaticMutex>&, unsigned char refCount)
+{
+ BASSERT(!m_refCount);
+ m_refCount = refCount;
+}
+
+inline bool SmallLine::deref(std::lock_guard<StaticMutex>&)
+{
+ BASSERT(m_refCount);
+ --m_refCount;
+ return !m_refCount;
+}
+
+} // namespace bmalloc
+
+#endif // SmallLine_h
diff --git a/Source/bmalloc/bmalloc/SmallPage.h b/Source/bmalloc/bmalloc/SmallPage.h
new file mode 100644
index 000000000..26e874859
--- /dev/null
+++ b/Source/bmalloc/bmalloc/SmallPage.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SmallPage_h
+#define SmallPage_h
+
+#include "BAssert.h"
+#include "List.h"
+#include "Mutex.h"
+#include "VMAllocate.h"
+#include <mutex>
+
+namespace bmalloc {
+
+class SmallPage : public ListNode<SmallPage> {
+public:
+ SmallPage()
+ : m_hasFreeLines(true)
+ {
+ }
+
+ void ref(std::lock_guard<StaticMutex>&);
+ bool deref(std::lock_guard<StaticMutex>&);
+ unsigned refCount(std::lock_guard<StaticMutex>&) { return m_refCount; }
+
+ size_t sizeClass() { return m_sizeClass; }
+ void setSizeClass(size_t sizeClass) { m_sizeClass = sizeClass; }
+
+ bool hasFreeLines(std::lock_guard<StaticMutex>&) const { return m_hasFreeLines; }
+ void setHasFreeLines(std::lock_guard<StaticMutex>&, bool hasFreeLines) { m_hasFreeLines = hasFreeLines; }
+
+ SmallLine* begin();
+
+ unsigned char slide() const { return m_slide; }
+ void setSlide(unsigned char slide) { m_slide = slide; }
+
+private:
+ unsigned char m_hasFreeLines: 1;
+ unsigned char m_refCount: 7;
+ unsigned char m_sizeClass;
+ unsigned char m_slide;
+
+static_assert(
+ sizeClassCount <= std::numeric_limits<decltype(m_sizeClass)>::max(),
+ "Largest size class must fit in SmallPage metadata");
+};
+
+inline void SmallPage::ref(std::lock_guard<StaticMutex>&)
+{
+ BASSERT(!m_slide);
+ ++m_refCount;
+ BASSERT(m_refCount);
+}
+
+inline bool SmallPage::deref(std::lock_guard<StaticMutex>&)
+{
+ BASSERT(!m_slide);
+ BASSERT(m_refCount);
+ --m_refCount;
+ return !m_refCount;
+}
+
+} // namespace bmalloc
+
+#endif // SmallPage_h
diff --git a/Source/bmalloc/bmalloc/StaticMutex.cpp b/Source/bmalloc/bmalloc/StaticMutex.cpp
new file mode 100644
index 000000000..ea25b1bdf
--- /dev/null
+++ b/Source/bmalloc/bmalloc/StaticMutex.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ScopeExit.h"
+#include "StaticMutex.h"
+#include <thread>
+
+namespace bmalloc {
+
+void StaticMutex::lockSlowCase()
+{
+ // The longest critical section in bmalloc is much shorter than the
+ // time it takes to make a system call to yield to the OS scheduler.
+ // So, we try again a lot before we yield.
+ static const size_t aLot = 256;
+
+ if (!m_isSpinning.test_and_set()) {
+ auto clear = makeScopeExit([&] { m_isSpinning.clear(); });
+
+ for (size_t i = 0; i < aLot; ++i) {
+ if (try_lock())
+ return;
+ }
+ }
+
+ // Avoid spinning pathologically.
+ while (!try_lock())
+ std::this_thread::yield();
+}
+
+} // namespace bmalloc
diff --git a/Source/bmalloc/bmalloc/StaticMutex.h b/Source/bmalloc/bmalloc/StaticMutex.h
new file mode 100644
index 000000000..9e1361749
--- /dev/null
+++ b/Source/bmalloc/bmalloc/StaticMutex.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef StaticMutex_h
+#define StaticMutex_h
+
+#include "BAssert.h"
+#include <atomic>
+#include <mutex>
+#include <thread>
+
+// A fast replacement for std::mutex, for use in static storage.
+
+// Use StaticMutex in static storage, where global constructors and exit-time
+// destructors are prohibited, but all memory is zero-initialized automatically.
+
+namespace bmalloc {
+
+class StaticMutex {
+protected:
+ // Subclasses that support non-static storage must use explicit initialization.
+ void init();
+
+public:
+ void lock();
+ bool try_lock();
+ void unlock();
+
+private:
+ void lockSlowCase();
+
+ std::atomic_flag m_flag;
+ std::atomic_flag m_isSpinning;
+};
+
+static inline void sleep(
+ std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds duration)
+{
+ if (duration == std::chrono::milliseconds(0))
+ return;
+
+ lock.unlock();
+ std::this_thread::sleep_for(duration);
+ lock.lock();
+}
+
+static inline void waitUntilFalse(
+ std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration,
+ bool& condition)
+{
+ while (condition) {
+ condition = false;
+ sleep(lock, sleepDuration);
+ }
+}
+
+inline void StaticMutex::init()
+{
+ m_flag.clear();
+ m_isSpinning.clear();
+}
+
+inline bool StaticMutex::try_lock()
+{
+ return !m_flag.test_and_set(std::memory_order_acquire);
+}
+
+inline void StaticMutex::lock()
+{
+ if (!try_lock())
+ lockSlowCase();
+}
+
+inline void StaticMutex::unlock()
+{
+ m_flag.clear(std::memory_order_release);
+}
+
+} // namespace bmalloc
+
+#endif // StaticMutex_h
diff --git a/Source/bmalloc/bmalloc/Syscall.h b/Source/bmalloc/bmalloc/Syscall.h
new file mode 100644
index 000000000..05748dbc2
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Syscall.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Syscall_h
+#define Syscall_h
+
+#include <errno.h>
+
+#define SYSCALL(x) do { \
+ while ((x) == -1 && errno == EAGAIN) { } \
+} while (0);
+
+#endif // Syscall_h
diff --git a/Source/bmalloc/bmalloc/VMAllocate.h b/Source/bmalloc/bmalloc/VMAllocate.h
new file mode 100644
index 000000000..1b79f3b3d
--- /dev/null
+++ b/Source/bmalloc/bmalloc/VMAllocate.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VMAllocate_h
+#define VMAllocate_h
+
+#include "BAssert.h"
+#include "Logging.h"
+#include "Range.h"
+#include "Sizes.h"
+#include "Syscall.h"
+#include <algorithm>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#if BOS(DARWIN)
+#include <mach/vm_page_size.h>
+#include <mach/vm_statistics.h>
+#endif
+
+namespace bmalloc {
+
+#if BOS(DARWIN)
+#define BMALLOC_VM_TAG VM_MAKE_TAG(VM_MEMORY_TCMALLOC)
+#else
+#define BMALLOC_VM_TAG -1
+#endif
+
+inline size_t vmPageSize()
+{
+ static size_t cached;
+ if (!cached)
+ cached = sysconf(_SC_PAGESIZE);
+ return cached;
+}
+
+inline size_t vmPageShift()
+{
+ static size_t cached;
+ if (!cached)
+ cached = log2(vmPageSize());
+ return cached;
+}
+
+inline size_t vmSize(size_t size)
+{
+ return roundUpToMultipleOf(vmPageSize(), size);
+}
+
+inline void vmValidate(size_t vmSize)
+{
+ UNUSED(vmSize);
+ BASSERT(vmSize);
+ BASSERT(vmSize == roundUpToMultipleOf(vmPageSize(), vmSize));
+}
+
+inline void vmValidate(void* p, size_t vmSize)
+{
+ vmValidate(vmSize);
+
+ UNUSED(p);
+ BASSERT(p);
+ BASSERT(p == mask(p, ~(vmPageSize() - 1)));
+}
+
+inline size_t vmPageSizePhysical()
+{
+#if (BPLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000)
+ return vm_kernel_page_size;
+#else
+ static size_t cached;
+ if (!cached)
+ cached = sysconf(_SC_PAGESIZE);
+ return cached;
+#endif
+}
+
+inline void vmValidatePhysical(size_t vmSize)
+{
+ UNUSED(vmSize);
+ BASSERT(vmSize);
+ BASSERT(vmSize == roundUpToMultipleOf(vmPageSizePhysical(), vmSize));
+}
+
+inline void vmValidatePhysical(void* p, size_t vmSize)
+{
+ vmValidatePhysical(vmSize);
+
+ UNUSED(p);
+ BASSERT(p);
+ BASSERT(p == mask(p, ~(vmPageSizePhysical() - 1)));
+}
+
+inline void* tryVMAllocate(size_t vmSize)
+{
+ vmValidate(vmSize);
+ void* result = mmap(0, vmSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, BMALLOC_VM_TAG, 0);
+ if (result == MAP_FAILED) {
+ logVMFailure();
+ return nullptr;
+ }
+ return result;
+}
+
+inline void* vmAllocate(size_t vmSize)
+{
+ void* result = tryVMAllocate(vmSize);
+ RELEASE_BASSERT(result);
+ return result;
+}
+
+inline void vmDeallocate(void* p, size_t vmSize)
+{
+ vmValidate(p, vmSize);
+ munmap(p, vmSize);
+}
+
+inline void vmRevokePermissions(void* p, size_t vmSize)
+{
+ vmValidate(p, vmSize);
+ mprotect(p, vmSize, PROT_NONE);
+}
+
+// Allocates vmSize bytes at a specified power-of-two alignment.
+// Use this function to create maskable memory regions.
+
+inline void* tryVMAllocate(size_t vmAlignment, size_t vmSize)
+{
+ vmValidate(vmSize);
+ vmValidate(vmAlignment);
+
+ size_t mappedSize = vmAlignment + vmSize;
+ if (mappedSize < vmAlignment || mappedSize < vmSize) // Check for overflow
+ return nullptr;
+
+ char* mapped = static_cast<char*>(tryVMAllocate(mappedSize));
+ if (!mapped)
+ return nullptr;
+ char* mappedEnd = mapped + mappedSize;
+
+ char* aligned = roundUpToMultipleOf(vmAlignment, mapped);
+ char* alignedEnd = aligned + vmSize;
+
+ RELEASE_BASSERT(alignedEnd <= mappedEnd);
+
+ if (size_t leftExtra = aligned - mapped)
+ vmDeallocate(mapped, leftExtra);
+
+ if (size_t rightExtra = mappedEnd - alignedEnd)
+ vmDeallocate(alignedEnd, rightExtra);
+
+ return aligned;
+}
+
+inline void* vmAllocate(size_t vmAlignment, size_t vmSize)
+{
+ void* result = tryVMAllocate(vmAlignment, vmSize);
+ RELEASE_BASSERT(result);
+ return result;
+}
+
+inline void vmDeallocatePhysicalPages(void* p, size_t vmSize)
+{
+ vmValidatePhysical(p, vmSize);
+#if BOS(DARWIN)
+ SYSCALL(madvise(p, vmSize, MADV_FREE_REUSABLE));
+#else
+ SYSCALL(madvise(p, vmSize, MADV_DONTNEED));
+#endif
+}
+
+inline void vmAllocatePhysicalPages(void* p, size_t vmSize)
+{
+ vmValidatePhysical(p, vmSize);
+#if BOS(DARWIN)
+ SYSCALL(madvise(p, vmSize, MADV_FREE_REUSE));
+#else
+ SYSCALL(madvise(p, vmSize, MADV_NORMAL));
+#endif
+}
+
+// Trims requests that are un-page-aligned.
+inline void vmDeallocatePhysicalPagesSloppy(void* p, size_t size)
+{
+ char* begin = roundUpToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p));
+ char* end = roundDownToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p) + size);
+
+ if (begin >= end)
+ return;
+
+ vmDeallocatePhysicalPages(begin, end - begin);
+}
+
+// Expands requests that are un-page-aligned.
+inline void vmAllocatePhysicalPagesSloppy(void* p, size_t size)
+{
+ char* begin = roundDownToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p));
+ char* end = roundUpToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p) + size);
+
+ if (begin >= end)
+ return;
+
+ vmAllocatePhysicalPages(begin, end - begin);
+}
+
+} // namespace bmalloc
+
+#endif // VMAllocate_h
diff --git a/Source/bmalloc/bmalloc/VMHeap.cpp b/Source/bmalloc/bmalloc/VMHeap.cpp
new file mode 100644
index 000000000..74d8792bd
--- /dev/null
+++ b/Source/bmalloc/bmalloc/VMHeap.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "PerProcess.h"
+#include "VMHeap.h"
+#include <thread>
+
+namespace bmalloc {
+
+LargeRange VMHeap::tryAllocateLargeChunk(std::lock_guard<StaticMutex>&, size_t alignment, size_t size)
+{
+ // We allocate VM in aligned multiples to increase the chances that
+ // the OS will provide contiguous ranges that we can merge.
+ size_t roundedAlignment = roundUpToMultipleOf<chunkSize>(alignment);
+ if (roundedAlignment < alignment) // Check for overflow
+ return LargeRange();
+ alignment = roundedAlignment;
+
+ size_t roundedSize = roundUpToMultipleOf<chunkSize>(size);
+ if (roundedSize < size) // Check for overflow
+ return LargeRange();
+ size = roundedSize;
+
+ void* memory = tryVMAllocate(alignment, size);
+ if (!memory)
+ return LargeRange();
+
+ Chunk* chunk = static_cast<Chunk*>(memory);
+
+#if BOS(DARWIN)
+ m_zone.addRange(Range(chunk->bytes(), size));
+#endif
+
+ return LargeRange(chunk->bytes(), size, 0);
+}
+
+void VMHeap::allocateSmallChunk(std::lock_guard<StaticMutex>& lock, size_t pageClass)
+{
+ size_t pageSize = bmalloc::pageSize(pageClass);
+ size_t smallPageCount = pageSize / smallPageSize;
+
+ void* memory = vmAllocate(chunkSize, chunkSize);
+ Chunk* chunk = static_cast<Chunk*>(memory);
+
+ // We align to our page size in order to honor OS APIs and in order to
+ // guarantee that we can service aligned allocation requests at equal
+ // and smaller powers of two.
+ size_t vmPageSize = roundUpToMultipleOf(bmalloc::vmPageSize(), pageSize);
+ size_t metadataSize = roundUpToMultipleOfNonPowerOfTwo(vmPageSize, sizeof(Chunk));
+
+ Object begin(chunk, metadataSize);
+ Object end(chunk, chunkSize);
+
+ // Establish guard pages before writing to Chunk memory to work around
+ // an edge case in the Darwin VM system (<rdar://problem/25910098>).
+ vmRevokePermissions(begin.address(), vmPageSize);
+ vmRevokePermissions(end.address() - vmPageSize, vmPageSize);
+
+ begin = begin + vmPageSize;
+ end = end - vmPageSize;
+ BASSERT(begin <= end && end.offset() - begin.offset() >= pageSize);
+
+ new (chunk) Chunk(lock);
+
+#if BOS(DARWIN)
+ m_zone.addRange(Range(begin.address(), end.address() - begin.address()));
+#endif
+
+ for (Object it = begin; it + pageSize <= end; it = it + pageSize) {
+ SmallPage* page = it.page();
+
+ for (size_t i = 0; i < smallPageCount; ++i)
+ page[i].setSlide(i);
+
+ m_smallPages[pageClass].push(page);
+ }
+}
+
+} // namespace bmalloc
diff --git a/Source/bmalloc/bmalloc/VMHeap.h b/Source/bmalloc/bmalloc/VMHeap.h
new file mode 100644
index 000000000..c54bbf5fc
--- /dev/null
+++ b/Source/bmalloc/bmalloc/VMHeap.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VMHeap_h
+#define VMHeap_h
+
+#include "Chunk.h"
+#include "FixedVector.h"
+#include "LargeRange.h"
+#include "Map.h"
+#include "Vector.h"
+#if BOS(DARWIN)
+#include "Zone.h"
+#endif
+
+namespace bmalloc {
+
+class BeginTag;
+class EndTag;
+class Heap;
+
+typedef enum { Sync, Async } ScavengeMode;
+
+class VMHeap {
+public:
+ SmallPage* allocateSmallPage(std::lock_guard<StaticMutex>&, size_t);
+ void deallocateSmallPage(std::unique_lock<StaticMutex>&, size_t, SmallPage*, ScavengeMode);
+
+ LargeRange tryAllocateLargeChunk(std::lock_guard<StaticMutex>&, size_t alignment, size_t);
+
+private:
+ void allocateSmallChunk(std::lock_guard<StaticMutex>&, size_t);
+
+ std::array<List<SmallPage>, pageClassCount> m_smallPages;
+
+#if BOS(DARWIN)
+ Zone m_zone;
+#endif
+};
+
+inline SmallPage* VMHeap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t pageClass)
+{
+ if (m_smallPages[pageClass].isEmpty())
+ allocateSmallChunk(lock, pageClass);
+
+ SmallPage* page = m_smallPages[pageClass].pop();
+ vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize(pageClass));
+ return page;
+}
+
+inline void VMHeap::deallocateSmallPage(std::unique_lock<StaticMutex>& lock, size_t pageClass, SmallPage* page, ScavengeMode scavengeMode)
+{
+ if (scavengeMode == Async)
+ lock.unlock();
+ vmDeallocatePhysicalPagesSloppy(page->begin()->begin(), pageSize(pageClass));
+ if (scavengeMode == Async)
+ lock.lock();
+
+ m_smallPages[pageClass].push(page);
+}
+
+} // namespace bmalloc
+
+#endif // VMHeap_h
diff --git a/Source/bmalloc/bmalloc/Vector.h b/Source/bmalloc/bmalloc/Vector.h
new file mode 100644
index 000000000..28bbe1332
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Vector.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Vector_h
+#define Vector_h
+
+#include "Inline.h"
+#include "VMAllocate.h"
+#include <cstddef>
+#include <cstring>
+
+namespace bmalloc {
+
+// A replacement for std::vector that allocates using vmAllocate instead of
+// malloc, shrinks automatically, and supports "popping" from the middle.
+
+template<typename T>
+class Vector {
+ static_assert(std::is_trivially_destructible<T>::value, "Vector must have a trivial destructor.");
+public:
+ typedef T* iterator;
+ typedef const T* const_iterator;
+
+ Vector();
+ Vector(Vector&&);
+ ~Vector();
+
+ iterator begin() { return m_buffer; }
+ iterator end() { return m_buffer + m_size; }
+
+ size_t size() { return m_size; }
+ size_t capacity() { return m_capacity; }
+
+ T& operator[](size_t);
+ T& last() { return m_buffer[m_size - 1]; }
+
+ void push(const T&);
+
+ T pop();
+ T pop(size_t);
+ T pop(const_iterator it) { return pop(it - begin()); }
+
+ void insert(iterator, const T&);
+ T remove(iterator);
+
+ void grow(size_t);
+ void shrink(size_t);
+ void resize(size_t);
+
+ void shrinkToFit();
+
+private:
+ static const size_t growFactor = 2;
+ static const size_t shrinkFactor = 4;
+ static size_t initialCapacity() { return vmPageSize() / sizeof(T); }
+
+ void growCapacity();
+ void shrinkCapacity();
+ void reallocateBuffer(size_t);
+
+ T* m_buffer;
+ size_t m_size;
+ size_t m_capacity;
+};
+
+template<typename T>
+inline Vector<T>::Vector()
+ : m_buffer(nullptr)
+ , m_size(0)
+ , m_capacity(0)
+{
+}
+
+template<typename T>
+inline Vector<T>::Vector(Vector&& other)
+ : m_buffer(other.m_buffer)
+ , m_size(other.m_size)
+ , m_capacity(other.m_capacity)
+{
+ other.m_buffer = nullptr;
+ other.m_size = 0;
+ other.m_capacity = 0;
+}
+
+template<typename T>
+Vector<T>::~Vector()
+{
+ if (m_buffer)
+ vmDeallocate(m_buffer, vmSize(m_capacity * sizeof(T)));
+}
+
+template<typename T>
+inline T& Vector<T>::operator[](size_t i)
+{
+ BASSERT(i < m_size);
+ return m_buffer[i];
+}
+
+template<typename T>
+INLINE void Vector<T>::push(const T& value)
+{
+ if (m_size == m_capacity)
+ growCapacity();
+ m_buffer[m_size++] = value;
+}
+
+template<typename T>
+inline T Vector<T>::pop()
+{
+ BASSERT(m_size);
+ T value = m_buffer[m_size - 1];
+ shrink(m_size - 1);
+ return value;
+}
+
+template<typename T>
+inline T Vector<T>::pop(size_t i)
+{
+ BASSERT(i < m_size);
+ std::swap(m_buffer[i], last());
+ return pop();
+}
+
+template<typename T>
+void Vector<T>::insert(iterator it, const T& value)
+{
+ size_t index = it - begin();
+ size_t moveCount = end() - it;
+
+ grow(m_size + 1);
+ std::memmove(&m_buffer[index + 1], &m_buffer[index], moveCount * sizeof(T));
+
+ m_buffer[index] = value;
+}
+
+template<typename T>
+T Vector<T>::remove(iterator it)
+{
+ size_t index = it - begin();
+ size_t moveCount = end() - it - 1;
+
+ T result = *it;
+
+ std::memmove(&m_buffer[index], &m_buffer[index + 1], moveCount * sizeof(T));
+ shrink(m_size - 1);
+
+ return result;
+}
+
+template<typename T>
+inline void Vector<T>::grow(size_t size)
+{
+ BASSERT(size >= m_size);
+ while (m_size < size)
+ push(T());
+}
+
+template<typename T>
+inline void Vector<T>::shrink(size_t size)
+{
+ BASSERT(size <= m_size);
+ m_size = size;
+ if (m_size < m_capacity / shrinkFactor && m_capacity > initialCapacity())
+ shrinkCapacity();
+}
+
+template<typename T>
+inline void Vector<T>::resize(size_t size)
+{
+ if (size <= m_size)
+ shrink(size);
+ else
+ grow(size);
+}
+
+template<typename T>
+void Vector<T>::reallocateBuffer(size_t newCapacity)
+{
+ RELEASE_BASSERT(newCapacity < std::numeric_limits<size_t>::max() / sizeof(T));
+
+ size_t vmSize = bmalloc::vmSize(newCapacity * sizeof(T));
+ T* newBuffer = vmSize ? static_cast<T*>(vmAllocate(vmSize)) : nullptr;
+ if (m_buffer) {
+ std::memcpy(newBuffer, m_buffer, m_size * sizeof(T));
+ vmDeallocate(m_buffer, bmalloc::vmSize(m_capacity * sizeof(T)));
+ }
+
+ m_buffer = newBuffer;
+ m_capacity = vmSize / sizeof(T);
+}
+
+template<typename T>
+NO_INLINE void Vector<T>::shrinkCapacity()
+{
+ size_t newCapacity = max(initialCapacity(), m_capacity / shrinkFactor);
+ reallocateBuffer(newCapacity);
+}
+
+template<typename T>
+NO_INLINE void Vector<T>::growCapacity()
+{
+ size_t newCapacity = max(initialCapacity(), m_size * growFactor);
+ reallocateBuffer(newCapacity);
+}
+
+template<typename T>
+void Vector<T>::shrinkToFit()
+{
+ if (m_size < m_capacity)
+ reallocateBuffer(m_size);
+}
+
+} // namespace bmalloc
+
+#endif // Vector_h
diff --git a/Source/bmalloc/bmalloc/Zone.cpp b/Source/bmalloc/bmalloc/Zone.cpp
new file mode 100644
index 000000000..a18e7cefa
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Zone.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "Sizes.h"
+#include "Zone.h"
+
+namespace bmalloc {
+
+template<typename T> static void remoteRead(task_t task, memory_reader_t reader, vm_address_t remotePointer, T& result)
+{
+ void* tmp = nullptr;
+ kern_return_t error = reader(task, remotePointer, sizeof(T), &tmp);
+
+ // This read sometimes fails for unknown reasons (<rdar://problem/14093757>).
+ // Avoid a crash by skipping the memcpy when this happens.
+ if (error || !tmp) {
+ fprintf(stderr, "bmalloc: error reading remote process: 0x%x\n", error);
+ return;
+ }
+
+ memcpy(&result, tmp, sizeof(T));
+}
+
+// These function pointers are invoked unconditionally on all zones by various
+// system tools. We don't support any of these features, but we provide
+// just enough functionality not to crash.
+
+static size_t good_size(malloc_zone_t*, size_t size)
+{
+ return size;
+}
+
+static boolean_t check(malloc_zone_t*)
+{
+ return true;
+}
+
+static void print(malloc_zone_t*, boolean_t)
+{
+}
+
+static void log(malloc_zone_t*, void*)
+{
+}
+
+static void force_lock(malloc_zone_t*)
+{
+}
+
+static void force_unlock(malloc_zone_t*)
+{
+}
+
+static void statistics(malloc_zone_t*, malloc_statistics_t* statistics)
+{
+ memset(statistics, 0, sizeof(malloc_statistics_t));
+}
+
+static size_t zoneSize(malloc_zone_t*, const void*)
+{
+ // Our zone is not public API, so no pointer can belong to us.
+ return 0;
+}
+
+// This function runs inside the leaks process.
+static kern_return_t enumerator(task_t task, void* context, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder)
+{
+ Zone remoteZone(task, reader, zone_address);
+ for (auto& range : remoteZone.ranges()) {
+ vm_range_t vmRange = { reinterpret_cast<vm_address_t>(range.begin()), range.size() };
+
+ if ((type_mask & MALLOC_PTR_REGION_RANGE_TYPE))
+ (*recorder)(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &vmRange, 1);
+
+ if ((type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE))
+ (*recorder)(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, &vmRange, 1);
+ }
+
+ return 0;
+}
+
+// The memory analysis API requires the contents of this struct to be a static
+// constant in the program binary. The leaks process will load this struct
+// out of the program binary (and not out of the running process).
+static const malloc_introspection_t zoneIntrospect = {
+ .enumerator = bmalloc::enumerator,
+ .good_size = bmalloc::good_size,
+ .check = bmalloc::check,
+ .print = bmalloc::print,
+ .log = bmalloc::log,
+ .force_lock = bmalloc::force_lock,
+ .force_unlock = bmalloc::force_unlock,
+ .statistics = bmalloc::statistics
+};
+
+Zone::Zone()
+{
+ malloc_zone_t::size = &bmalloc::zoneSize;
+ malloc_zone_t::zone_name = "WebKit Malloc";
+ malloc_zone_t::introspect = const_cast<malloc_introspection_t*>(&bmalloc::zoneIntrospect);
+ malloc_zone_t::version = 4;
+ malloc_zone_register(this);
+}
+
+Zone::Zone(task_t task, memory_reader_t reader, vm_address_t remotePointer)
+{
+ remoteRead(task, reader, remotePointer, *this);
+}
+
+} // namespace bmalloc
diff --git a/Source/bmalloc/bmalloc/Zone.h b/Source/bmalloc/bmalloc/Zone.h
new file mode 100644
index 000000000..6253418ed
--- /dev/null
+++ b/Source/bmalloc/bmalloc/Zone.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Zone_h
+#define Zone_h
+
+#include "FixedVector.h"
+#include "Range.h"
+#include <malloc/malloc.h>
+
+namespace bmalloc {
+
+class Chunk;
+
+class Zone : public malloc_zone_t {
+public:
+ // Enough capacity to track a 64GB heap, so probably enough for anything.
+ static const size_t capacity = 2048;
+
+ Zone();
+ Zone(task_t, memory_reader_t, vm_address_t);
+
+ void addRange(Range);
+ FixedVector<Range, capacity>& ranges() { return m_ranges; }
+
+private:
+ // This vector has two purposes:
+ // (1) It stores the list of Chunks so that we can enumerate
+ // each Chunk and request that it be scanned if reachable.
+ // (2) It roots a pointer to each Chunk in a global non-malloc
+ // VM region, making each Chunk appear reachable, and therefore
+ // ensuring that the leaks tool will scan it. (The leaks tool
+ // conservatively scans all writeable VM regions that are not malloc
+ // regions, and then scans malloc regions using the introspection API.)
+ // This prevents the leaks tool from reporting false positive leaks for
+ // objects pointed to from bmalloc memory -- though it also prevents the
+ // leaks tool from finding any leaks in bmalloc memory.
+ FixedVector<Range, capacity> m_ranges;
+};
+
+inline void Zone::addRange(Range range)
+{
+ if (m_ranges.size() == m_ranges.capacity())
+ return;
+
+ m_ranges.push(range);
+}
+
+} // namespace bmalloc
+
+#endif // Zone_h
diff --git a/Source/bmalloc/bmalloc/bmalloc.h b/Source/bmalloc/bmalloc/bmalloc.h
new file mode 100644
index 000000000..6d1f1e169
--- /dev/null
+++ b/Source/bmalloc/bmalloc/bmalloc.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "Cache.h"
+#include "Heap.h"
+#include "PerProcess.h"
+#include "StaticMutex.h"
+
+namespace bmalloc {
+namespace api {
+
+// Returns null on failure.
+inline void* tryMalloc(size_t size)
+{
+ return Cache::tryAllocate(size);
+}
+
+// Crashes on failure.
+inline void* malloc(size_t size)
+{
+ return Cache::allocate(size);
+}
+
+// Returns null on failure.
+inline void* tryMemalign(size_t alignment, size_t size)
+{
+ return Cache::tryAllocate(alignment, size);
+}
+
+// Crashes on failure.
+inline void* memalign(size_t alignment, size_t size)
+{
+ return Cache::allocate(alignment, size);
+}
+
+// Crashes on failure.
+inline void* realloc(void* object, size_t newSize)
+{
+ return Cache::reallocate(object, newSize);
+}
+
+inline void free(void* object)
+{
+ Cache::deallocate(object);
+}
+
+inline void scavengeThisThread()
+{
+ Cache::scavenge();
+}
+
+inline void scavenge()
+{
+ scavengeThisThread();
+
+ std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
+ PerProcess<Heap>::get()->scavenge(lock, Sync);
+}
+
+inline bool isEnabled()
+{
+ std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
+ return !PerProcess<Heap>::getFastCase()->debugHeap();
+}
+
+#if BOS(DARWIN)
+inline void setScavengerThreadQOSClass(qos_class_t overrideClass)
+{
+ std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
+ PerProcess<Heap>::getFastCase()->setScavengerThreadQOSClass(overrideClass);
+}
+#endif
+
+} // namespace api
+} // namespace bmalloc
diff --git a/Source/bmalloc/bmalloc/darwin/BSoftLinking.h b/Source/bmalloc/bmalloc/darwin/BSoftLinking.h
new file mode 100644
index 000000000..36a9fcf12
--- /dev/null
+++ b/Source/bmalloc/bmalloc/darwin/BSoftLinking.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2007, 2008, 2011-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef BSoftLinking_h
+#define BSoftLinking_h
+
+#include "BAssert.h"
+#include <dlfcn.h>
+#include <mutex>
+
+#define BSOFT_LINK_PRIVATE_FRAMEWORK(framework) \
+ static void* framework##Library() \
+ { \
+ static void* frameworkLibrary; \
+ static std::once_flag once; \
+ std::call_once(once, [] { \
+ frameworkLibrary = dlopen("/System/Library/PrivateFrameworks/" #framework ".framework/" #framework, RTLD_NOW); \
+ RELEASE_BASSERT_WITH_MESSAGE(frameworkLibrary, "%s", dlerror()); \
+ }); \
+ return frameworkLibrary; \
+ }
+
+#define BSOFT_LINK_FUNCTION(framework, functionName, resultType, parameterDeclarations, parameterNames) \
+ extern "C" { \
+ resultType functionName parameterDeclarations; \
+ } \
+ static resultType init##functionName parameterDeclarations; \
+ static resultType (*softLink##functionName) parameterDeclarations = init##functionName; \
+ \
+ static resultType init##functionName parameterDeclarations \
+ { \
+ static std::once_flag once; \
+ std::call_once(once, [] { \
+ softLink##functionName = (resultType (*) parameterDeclarations) dlsym(framework##Library(), #functionName); \
+ RELEASE_BASSERT_WITH_MESSAGE(softLink##functionName, "%s", dlerror()); \
+ }); \
+ return softLink##functionName parameterNames; \
+ } \
+ \
+ inline resultType functionName parameterDeclarations \
+ { \
+ return softLink##functionName parameterNames; \
+ }
+
+#endif // BSoftLinking_h
diff --git a/Source/bmalloc/bmalloc/mbmalloc.cpp b/Source/bmalloc/bmalloc/mbmalloc.cpp
new file mode 100644
index 000000000..f31a80d22
--- /dev/null
+++ b/Source/bmalloc/bmalloc/mbmalloc.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bmalloc.h"
+
+#define EXPORT __attribute__((visibility("default")))
+
+extern "C" {
+
+EXPORT void* mbmalloc(size_t);
+EXPORT void* mbmemalign(size_t, size_t);
+EXPORT void mbfree(void*, size_t);
+EXPORT void* mbrealloc(void*, size_t, size_t);
+EXPORT void mbscavenge();
+
+void* mbmalloc(size_t size)
+{
+ return bmalloc::api::malloc(size);
+}
+
+void* mbmemalign(size_t alignment, size_t size)
+{
+ return bmalloc::api::memalign(alignment, size);
+}
+
+void mbfree(void* p, size_t)
+{
+ bmalloc::api::free(p);
+}
+
+void* mbrealloc(void* p, size_t, size_t size)
+{
+ return bmalloc::api::realloc(p, size);
+}
+
+void mbscavenge()
+{
+ bmalloc::api::scavenge();
+}
+
+} // extern "C"