summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/heap
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@digia.com>2012-10-17 16:21:14 +0200
committerSimon Hausmann <simon.hausmann@digia.com>2012-10-17 16:21:14 +0200
commit8995b83bcbfbb68245f779b64e5517627c6cc6ea (patch)
tree17985605dab9263cc2444bd4d45f189e142cca7c /Source/JavaScriptCore/heap
parentb9c9652036d5e9f1e29c574f40bc73a35c81ace6 (diff)
downloadqtwebkit-8995b83bcbfbb68245f779b64e5517627c6cc6ea.tar.gz
Imported WebKit commit cf4f8fc6f19b0629f51860cb2d4b25e139d07e00 (http://svn.webkit.org/repository/webkit/trunk@131592)
New snapshot that includes the build fixes for Mac OS X 10.6 and earlier as well as the previously cherry-picked changes
Diffstat (limited to 'Source/JavaScriptCore/heap')
-rw-r--r--Source/JavaScriptCore/heap/BlockAllocator.cpp73
-rw-r--r--Source/JavaScriptCore/heap/BlockAllocator.h280
-rw-r--r--Source/JavaScriptCore/heap/CopiedBlock.h101
-rw-r--r--Source/JavaScriptCore/heap/CopiedSpace.cpp72
-rw-r--r--Source/JavaScriptCore/heap/CopiedSpace.h12
-rw-r--r--Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h25
-rw-r--r--Source/JavaScriptCore/heap/CopyVisitor.cpp57
-rw-r--r--Source/JavaScriptCore/heap/CopyVisitor.h60
-rw-r--r--Source/JavaScriptCore/heap/CopyVisitorInlineMethods.h121
-rw-r--r--Source/JavaScriptCore/heap/GCThread.cpp130
-rw-r--r--Source/JavaScriptCore/heap/GCThread.h63
-rw-r--r--Source/JavaScriptCore/heap/GCThreadSharedData.cpp107
-rw-r--r--Source/JavaScriptCore/heap/GCThreadSharedData.h45
-rw-r--r--Source/JavaScriptCore/heap/Heap.cpp174
-rw-r--r--Source/JavaScriptCore/heap/Heap.h54
-rw-r--r--Source/JavaScriptCore/heap/HeapBlock.h22
-rw-r--r--Source/JavaScriptCore/heap/HeapStatistics.cpp258
-rw-r--r--Source/JavaScriptCore/heap/HeapStatistics.h61
-rw-r--r--Source/JavaScriptCore/heap/IncrementalSweeper.cpp25
-rw-r--r--Source/JavaScriptCore/heap/IncrementalSweeper.h19
-rw-r--r--Source/JavaScriptCore/heap/JITStubRoutineSet.cpp4
-rw-r--r--Source/JavaScriptCore/heap/MarkedAllocator.cpp23
-rw-r--r--Source/JavaScriptCore/heap/MarkedAllocator.h16
-rw-r--r--Source/JavaScriptCore/heap/MarkedBlock.cpp46
-rw-r--r--Source/JavaScriptCore/heap/MarkedBlock.h35
-rw-r--r--Source/JavaScriptCore/heap/MarkedSpace.cpp57
-rw-r--r--Source/JavaScriptCore/heap/MarkedSpace.h62
-rw-r--r--Source/JavaScriptCore/heap/SlotVisitor.cpp75
-rw-r--r--Source/JavaScriptCore/heap/SlotVisitor.h21
-rw-r--r--Source/JavaScriptCore/heap/SlotVisitorInlineMethods.h41
-rw-r--r--Source/JavaScriptCore/heap/Weak.h4
31 files changed, 1574 insertions, 569 deletions
diff --git a/Source/JavaScriptCore/heap/BlockAllocator.cpp b/Source/JavaScriptCore/heap/BlockAllocator.cpp
index 690fd83c4..9a2e0bf60 100644
--- a/Source/JavaScriptCore/heap/BlockAllocator.cpp
+++ b/Source/JavaScriptCore/heap/BlockAllocator.cpp
@@ -26,51 +26,54 @@
#include "config.h"
#include "BlockAllocator.h"
+#include "CopiedBlock.h"
+#include "MarkedBlock.h"
#include <wtf/CurrentTime.h>
namespace JSC {
BlockAllocator::BlockAllocator()
- : m_numberOfFreeBlocks(0)
+ : m_copiedRegionSet(CopiedBlock::blockSize)
+ , m_markedRegionSet(MarkedBlock::blockSize)
+ , m_numberOfEmptyRegions(0)
, m_isCurrentlyAllocating(false)
, m_blockFreeingThreadShouldQuit(false)
, m_blockFreeingThread(createThread(blockFreeingThreadStartFunc, this, "JavaScriptCore::BlockFree"))
{
ASSERT(m_blockFreeingThread);
- m_freeBlockLock.Init();
+ m_regionLock.Init();
}
BlockAllocator::~BlockAllocator()
{
- releaseFreeBlocks();
+ releaseFreeRegions();
{
- MutexLocker locker(m_freeBlockConditionLock);
-
+ MutexLocker locker(m_emptyRegionConditionLock);
m_blockFreeingThreadShouldQuit = true;
- m_freeBlockCondition.broadcast();
+ m_emptyRegionCondition.broadcast();
}
waitForThreadCompletion(m_blockFreeingThread);
}
-void BlockAllocator::releaseFreeBlocks()
+void BlockAllocator::releaseFreeRegions()
{
while (true) {
- DeadBlock* block;
+ Region* region;
{
- SpinLockHolder locker(&m_freeBlockLock);
- if (!m_numberOfFreeBlocks)
- block = 0;
+ SpinLockHolder locker(&m_regionLock);
+ if (!m_numberOfEmptyRegions)
+ region = 0;
else {
- block = m_freeBlocks.removeHead();
- ASSERT(block);
- m_numberOfFreeBlocks--;
+ region = m_emptyRegions.removeHead();
+ ASSERT(region);
+ m_numberOfEmptyRegions--;
}
}
- if (!block)
+ if (!region)
break;
- DeadBlock::destroy(block).deallocate();
+ delete region;
}
}
@@ -79,7 +82,7 @@ void BlockAllocator::waitForRelativeTimeWhileHoldingLock(double relative)
if (m_blockFreeingThreadShouldQuit)
return;
- m_freeBlockCondition.timedWait(m_freeBlockConditionLock, currentTime() + relative);
+ m_emptyRegionCondition.timedWait(m_emptyRegionConditionLock, currentTime() + relative);
}
void BlockAllocator::waitForRelativeTime(double relative)
@@ -88,7 +91,7 @@ void BlockAllocator::waitForRelativeTime(double relative)
// frequently. It would only be a bug if this function failed to return
// when it was asked to do so.
- MutexLocker locker(m_freeBlockConditionLock);
+ MutexLocker locker(m_emptyRegionConditionLock);
waitForRelativeTimeWhileHoldingLock(relative);
}
@@ -114,30 +117,40 @@ void BlockAllocator::blockFreeingThreadMain()
// Now process the list of free blocks. Keep freeing until half of the
// blocks that are currently on the list are gone. Assume that a size_t
// field can be accessed atomically.
- size_t currentNumberOfFreeBlocks = m_numberOfFreeBlocks;
- if (!currentNumberOfFreeBlocks)
+ size_t currentNumberOfEmptyRegions = m_numberOfEmptyRegions;
+ if (!currentNumberOfEmptyRegions)
continue;
- size_t desiredNumberOfFreeBlocks = currentNumberOfFreeBlocks / 2;
+ size_t desiredNumberOfEmptyRegions = currentNumberOfEmptyRegions / 2;
while (!m_blockFreeingThreadShouldQuit) {
- DeadBlock* block;
+ Region* region;
{
- SpinLockHolder locker(&m_freeBlockLock);
- if (m_numberOfFreeBlocks <= desiredNumberOfFreeBlocks)
- block = 0;
+ SpinLockHolder locker(&m_regionLock);
+ if (m_numberOfEmptyRegions <= desiredNumberOfEmptyRegions)
+ region = 0;
else {
- block = m_freeBlocks.removeHead();
- ASSERT(block);
- m_numberOfFreeBlocks--;
+ region = m_emptyRegions.removeHead();
+ ASSERT(region);
+ m_numberOfEmptyRegions--;
}
}
- if (!block)
+ if (!region)
break;
- DeadBlock::destroy(block).deallocate();
+ delete region;
+ }
+
+ // Sleep until there is actually work to do rather than waking up every second to check.
+ MutexLocker locker(m_emptyRegionConditionLock);
+ m_regionLock.Lock();
+ while (!m_numberOfEmptyRegions && !m_blockFreeingThreadShouldQuit) {
+ m_regionLock.Unlock();
+ m_emptyRegionCondition.wait(m_emptyRegionConditionLock);
+ m_regionLock.Lock();
}
+ m_regionLock.Unlock();
}
}
diff --git a/Source/JavaScriptCore/heap/BlockAllocator.h b/Source/JavaScriptCore/heap/BlockAllocator.h
index 042e65d92..a41df1aab 100644
--- a/Source/JavaScriptCore/heap/BlockAllocator.h
+++ b/Source/JavaScriptCore/heap/BlockAllocator.h
@@ -35,25 +35,116 @@
namespace JSC {
+class BlockAllocator;
+class CopiedBlock;
+class MarkedBlock;
+class Region;
+
// Simple allocator to reduce VM cost by holding onto blocks of memory for
// short periods of time and then freeing them on a secondary thread.
class DeadBlock : public HeapBlock<DeadBlock> {
public:
- static DeadBlock* create(const PageAllocationAligned&);
+ DeadBlock(Region*);
+};
+
+inline DeadBlock::DeadBlock(Region* region)
+ : HeapBlock<DeadBlock>(region)
+{
+}
+
+class Region : public DoublyLinkedListNode<Region> {
+ friend CLASS_IF_GCC DoublyLinkedListNode<Region>;
+ friend class BlockAllocator;
+public:
+ ~Region();
+ static Region* create(size_t blockSize);
+ static Region* createCustomSize(size_t blockSize, size_t blockAlignment);
+ Region* reset(size_t blockSize);
+
+ size_t blockSize() const { return m_blockSize; }
+ bool isFull() const { return m_blocksInUse == m_totalBlocks; }
+ bool isEmpty() const { return !m_blocksInUse; }
+
+ DeadBlock* allocate();
+ void deallocate(void*);
+
+ static const size_t s_regionSize = 64 * KB;
private:
- DeadBlock(const PageAllocationAligned&);
+ Region(PageAllocationAligned&, size_t blockSize, size_t totalBlocks);
+
+ PageAllocationAligned m_allocation;
+ size_t m_totalBlocks;
+ size_t m_blocksInUse;
+ size_t m_blockSize;
+ Region* m_prev;
+ Region* m_next;
+ DoublyLinkedList<DeadBlock> m_deadBlocks;
};
-inline DeadBlock::DeadBlock(const PageAllocationAligned& allocation)
- : HeapBlock<DeadBlock>(allocation)
+inline Region* Region::create(size_t blockSize)
+{
+ ASSERT(blockSize <= s_regionSize);
+ ASSERT(!(s_regionSize % blockSize));
+ PageAllocationAligned allocation = PageAllocationAligned::allocate(s_regionSize, s_regionSize, OSAllocator::JSGCHeapPages);
+ if (!static_cast<bool>(allocation))
+ CRASH();
+ return new Region(allocation, blockSize, s_regionSize / blockSize);
+}
+
+inline Region* Region::createCustomSize(size_t blockSize, size_t blockAlignment)
{
+ PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, blockAlignment, OSAllocator::JSGCHeapPages);
+ if (!static_cast<bool>(allocation))
+ CRASH();
+ return new Region(allocation, blockSize, 1);
}
-inline DeadBlock* DeadBlock::create(const PageAllocationAligned& allocation)
+inline Region::Region(PageAllocationAligned& allocation, size_t blockSize, size_t totalBlocks)
+ : DoublyLinkedListNode<Region>()
+ , m_allocation(allocation)
+ , m_totalBlocks(totalBlocks)
+ , m_blocksInUse(0)
+ , m_blockSize(blockSize)
+ , m_prev(0)
+ , m_next(0)
{
- return new(NotNull, allocation.base()) DeadBlock(allocation);
+ ASSERT(allocation);
+ char* start = static_cast<char*>(m_allocation.base());
+ char* end = start + m_allocation.size();
+ for (char* current = start; current < end; current += blockSize)
+ m_deadBlocks.append(new (NotNull, current) DeadBlock(this));
+}
+
+inline Region::~Region()
+{
+ ASSERT(isEmpty());
+ m_allocation.deallocate();
+}
+
+inline Region* Region::reset(size_t blockSize)
+{
+ ASSERT(isEmpty());
+ PageAllocationAligned allocation = m_allocation;
+ return new (NotNull, this) Region(allocation, blockSize, s_regionSize / blockSize);
+}
+
+inline DeadBlock* Region::allocate()
+{
+ ASSERT(!isFull());
+ m_blocksInUse++;
+ return m_deadBlocks.removeHead();
+}
+
+inline void Region::deallocate(void* base)
+{
+ ASSERT(base);
+ ASSERT(m_blocksInUse);
+ ASSERT(base >= m_allocation.base() && base < static_cast<char*>(m_allocation.base()) + m_allocation.size());
+ DeadBlock* block = new (NotNull, base) DeadBlock(this);
+ m_deadBlocks.push(block);
+ m_blocksInUse--;
}
class BlockAllocator {
@@ -61,8 +152,10 @@ public:
BlockAllocator();
~BlockAllocator();
- PageAllocationAligned allocate();
- void deallocate(PageAllocationAligned);
+ template <typename T> DeadBlock* allocate();
+ DeadBlock* allocateCustomSize(size_t blockSize, size_t blockAlignment);
+ template <typename T> void deallocate(T*);
+ template <typename T> void deallocateCustomSize(T*);
private:
void waitForRelativeTimeWhileHoldingLock(double relative);
@@ -71,42 +164,169 @@ private:
void blockFreeingThreadMain();
static void blockFreeingThreadStartFunc(void* heap);
- void releaseFreeBlocks();
+ struct RegionSet {
+ RegionSet(size_t blockSize)
+ : m_numberOfPartialRegions(0)
+ , m_blockSize(blockSize)
+ {
+ }
+ DoublyLinkedList<Region> m_fullRegions;
+ DoublyLinkedList<Region> m_partialRegions;
+ size_t m_numberOfPartialRegions;
+ size_t m_blockSize;
+ };
+
+ DeadBlock* tryAllocateFromRegion(RegionSet&, DoublyLinkedList<Region>&, size_t&);
+
+ void releaseFreeRegions();
+
+ template <typename T> RegionSet& regionSetFor();
+
+ RegionSet m_copiedRegionSet;
+ RegionSet m_markedRegionSet;
+
+ DoublyLinkedList<Region> m_emptyRegions;
+ size_t m_numberOfEmptyRegions;
- DoublyLinkedList<DeadBlock> m_freeBlocks;
- size_t m_numberOfFreeBlocks;
bool m_isCurrentlyAllocating;
bool m_blockFreeingThreadShouldQuit;
- SpinLock m_freeBlockLock;
- Mutex m_freeBlockConditionLock;
- ThreadCondition m_freeBlockCondition;
+ SpinLock m_regionLock;
+ Mutex m_emptyRegionConditionLock;
+ ThreadCondition m_emptyRegionCondition;
ThreadIdentifier m_blockFreeingThread;
};
-inline PageAllocationAligned BlockAllocator::allocate()
+inline DeadBlock* BlockAllocator::tryAllocateFromRegion(RegionSet& set, DoublyLinkedList<Region>& regions, size_t& numberOfRegions)
{
+ if (numberOfRegions) {
+ ASSERT(!regions.isEmpty());
+ Region* region = regions.head();
+ ASSERT(!region->isFull());
+
+ if (region->isEmpty()) {
+ ASSERT(region == m_emptyRegions.head());
+ m_numberOfEmptyRegions--;
+ set.m_numberOfPartialRegions++;
+ region = m_emptyRegions.removeHead()->reset(set.m_blockSize);
+ set.m_partialRegions.push(region);
+ }
+
+ DeadBlock* block = region->allocate();
+
+ if (region->isFull()) {
+ set.m_numberOfPartialRegions--;
+ set.m_fullRegions.push(set.m_partialRegions.removeHead());
+ }
+
+ return block;
+ }
+ return 0;
+}
+
+template<typename T>
+inline DeadBlock* BlockAllocator::allocate()
+{
+ RegionSet& set = regionSetFor<T>();
+ DeadBlock* block;
+ m_isCurrentlyAllocating = true;
{
- SpinLockHolder locker(&m_freeBlockLock);
- m_isCurrentlyAllocating = true;
- if (m_numberOfFreeBlocks) {
- ASSERT(!m_freeBlocks.isEmpty());
- m_numberOfFreeBlocks--;
- return DeadBlock::destroy(m_freeBlocks.removeHead());
+ SpinLockHolder locker(&m_regionLock);
+ if ((block = tryAllocateFromRegion(set, set.m_partialRegions, set.m_numberOfPartialRegions)))
+ return block;
+ if ((block = tryAllocateFromRegion(set, m_emptyRegions, m_numberOfEmptyRegions)))
+ return block;
+ }
+
+ Region* newRegion = Region::create(T::blockSize);
+
+ SpinLockHolder locker(&m_regionLock);
+ m_emptyRegions.push(newRegion);
+ m_numberOfEmptyRegions++;
+ block = tryAllocateFromRegion(set, m_emptyRegions, m_numberOfEmptyRegions);
+ ASSERT(block);
+ return block;
+}
+
+inline DeadBlock* BlockAllocator::allocateCustomSize(size_t blockSize, size_t blockAlignment)
+{
+ size_t realSize = WTF::roundUpToMultipleOf(blockAlignment, blockSize);
+ Region* newRegion = Region::createCustomSize(realSize, blockAlignment);
+ DeadBlock* block = newRegion->allocate();
+ ASSERT(block);
+ return block;
+}
+
+template<typename T>
+inline void BlockAllocator::deallocate(T* block)
+{
+ RegionSet& set = regionSetFor<T>();
+ bool shouldWakeBlockFreeingThread = false;
+ {
+ SpinLockHolder locker(&m_regionLock);
+ Region* region = block->region();
+ ASSERT(!region->isEmpty());
+ if (region->isFull())
+ set.m_fullRegions.remove(region);
+ else {
+ set.m_partialRegions.remove(region);
+ set.m_numberOfPartialRegions--;
+ }
+
+ region->deallocate(block);
+
+ if (region->isEmpty()) {
+ m_emptyRegions.push(region);
+ shouldWakeBlockFreeingThread = !m_numberOfEmptyRegions;
+ m_numberOfEmptyRegions++;
+ } else {
+ set.m_partialRegions.push(region);
+ set.m_numberOfPartialRegions++;
}
}
- ASSERT(m_freeBlocks.isEmpty());
- PageAllocationAligned allocation = PageAllocationAligned::allocate(DeadBlock::s_blockSize, DeadBlock::s_blockSize, OSAllocator::JSGCHeapPages);
- if (!static_cast<bool>(allocation))
- CRASH();
- return allocation;
+ if (shouldWakeBlockFreeingThread) {
+ MutexLocker mutexLocker(m_emptyRegionConditionLock);
+ m_emptyRegionCondition.signal();
+ }
+}
+
+template<typename T>
+inline void BlockAllocator::deallocateCustomSize(T* block)
+{
+ Region* region = block->region();
+ region->deallocate(block);
+ delete region;
+}
+
+template <>
+inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<CopiedBlock>()
+{
+ return m_copiedRegionSet;
+}
+
+template <>
+inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<MarkedBlock>()
+{
+ return m_markedRegionSet;
+}
+
+template <>
+inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<CopiedBlock> >()
+{
+ return m_copiedRegionSet;
+}
+
+template <>
+inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<MarkedBlock> >()
+{
+ return m_markedRegionSet;
}
-inline void BlockAllocator::deallocate(PageAllocationAligned allocation)
+template <typename T>
+inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor()
{
- SpinLockHolder locker(&m_freeBlockLock);
- m_freeBlocks.push(DeadBlock::create(allocation));
- m_numberOfFreeBlocks++;
+ ASSERT_NOT_REACHED();
+ return *(RegionSet*)0;
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/CopiedBlock.h b/Source/JavaScriptCore/heap/CopiedBlock.h
index ad5dbb46b..af36f55df 100644
--- a/Source/JavaScriptCore/heap/CopiedBlock.h
+++ b/Source/JavaScriptCore/heap/CopiedBlock.h
@@ -26,9 +26,12 @@
#ifndef CopiedBlock_h
#define CopiedBlock_h
+#include "BlockAllocator.h"
#include "HeapBlock.h"
#include "JSValue.h"
#include "JSValueInlineMethods.h"
+#include "Options.h"
+#include <wtf/Atomics.h>
namespace JSC {
@@ -38,8 +41,17 @@ class CopiedBlock : public HeapBlock<CopiedBlock> {
friend class CopiedSpace;
friend class CopiedAllocator;
public:
- static CopiedBlock* create(const PageAllocationAligned&);
- static CopiedBlock* createNoZeroFill(const PageAllocationAligned&);
+ static CopiedBlock* create(DeadBlock*);
+ static CopiedBlock* createNoZeroFill(DeadBlock*);
+
+ bool isPinned();
+
+ unsigned liveBytes();
+ void reportLiveBytes(unsigned);
+ void didSurviveGC();
+ bool didEvacuateBytes(unsigned);
+ bool shouldEvacuate();
+ bool canBeRecycled();
// The payload is the region of the block that is usable for allocations.
char* payload();
@@ -60,24 +72,28 @@ public:
size_t size();
size_t capacity();
+ static const size_t blockSize = 32 * KB;
+
private:
- CopiedBlock(const PageAllocationAligned&);
+ CopiedBlock(Region*);
void zeroFillWilderness(); // Can be called at any time to zero-fill to the end of the block.
size_t m_remaining;
uintptr_t m_isPinned;
+ unsigned m_liveBytes;
};
-inline CopiedBlock* CopiedBlock::createNoZeroFill(const PageAllocationAligned& allocation)
+inline CopiedBlock* CopiedBlock::createNoZeroFill(DeadBlock* block)
{
- return new(NotNull, allocation.base()) CopiedBlock(allocation);
+ Region* region = block->region();
+ return new(NotNull, block) CopiedBlock(region);
}
-inline CopiedBlock* CopiedBlock::create(const PageAllocationAligned& allocation)
+inline CopiedBlock* CopiedBlock::create(DeadBlock* block)
{
- CopiedBlock* block = createNoZeroFill(allocation);
- block->zeroFillWilderness();
- return block;
+ CopiedBlock* newBlock = createNoZeroFill(block);
+ newBlock->zeroFillWilderness();
+ return newBlock;
}
inline void CopiedBlock::zeroFillWilderness()
@@ -92,14 +108,73 @@ inline void CopiedBlock::zeroFillWilderness()
#endif
}
-inline CopiedBlock::CopiedBlock(const PageAllocationAligned& allocation)
- : HeapBlock<CopiedBlock>(allocation)
+inline CopiedBlock::CopiedBlock(Region* region)
+ : HeapBlock<CopiedBlock>(region)
, m_remaining(payloadCapacity())
, m_isPinned(false)
+ , m_liveBytes(0)
{
ASSERT(is8ByteAligned(reinterpret_cast<void*>(m_remaining)));
}
+inline void CopiedBlock::reportLiveBytes(unsigned bytes)
+{
+#if ENABLE(PARALLEL_GC)
+ unsigned oldValue = 0;
+ unsigned newValue = 0;
+ do {
+ oldValue = m_liveBytes;
+ newValue = oldValue + bytes;
+ } while (!WTF::weakCompareAndSwap(&m_liveBytes, oldValue, newValue));
+#else
+ m_liveBytes += bytes;
+#endif
+}
+
+inline void CopiedBlock::didSurviveGC()
+{
+ m_liveBytes = 0;
+ m_isPinned = false;
+}
+
+inline bool CopiedBlock::didEvacuateBytes(unsigned bytes)
+{
+ ASSERT(m_liveBytes >= bytes);
+#if ENABLE(PARALLEL_GC)
+ unsigned oldValue = 0;
+ unsigned newValue = 0;
+ do {
+ oldValue = m_liveBytes;
+ newValue = oldValue - bytes;
+ } while (!WTF::weakCompareAndSwap(&m_liveBytes, oldValue, newValue));
+ ASSERT(m_liveBytes < oldValue);
+ return !newValue;
+#else
+ m_liveBytes -= bytes;
+ return !m_liveBytes;
+#endif
+}
+
+inline bool CopiedBlock::canBeRecycled()
+{
+ return !m_liveBytes;
+}
+
+inline bool CopiedBlock::shouldEvacuate()
+{
+ return static_cast<double>(m_liveBytes) / static_cast<double>(payloadCapacity()) <= Options::minCopiedBlockUtilization();
+}
+
+inline bool CopiedBlock::isPinned()
+{
+ return m_isPinned;
+}
+
+inline unsigned CopiedBlock::liveBytes()
+{
+ return m_liveBytes;
+}
+
inline char* CopiedBlock::payload()
{
return reinterpret_cast<char*>(this) + ((sizeof(CopiedBlock) + 7) & ~7);
@@ -107,7 +182,7 @@ inline char* CopiedBlock::payload()
inline char* CopiedBlock::payloadEnd()
{
- return reinterpret_cast<char*>(this) + allocation().size();
+ return reinterpret_cast<char*>(this) + region()->blockSize();
}
inline size_t CopiedBlock::payloadCapacity()
@@ -152,7 +227,7 @@ inline size_t CopiedBlock::size()
inline size_t CopiedBlock::capacity()
{
- return allocation().size();
+ return region()->blockSize();
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/CopiedSpace.cpp b/Source/JavaScriptCore/heap/CopiedSpace.cpp
index bf87a305c..cedafee3a 100644
--- a/Source/JavaScriptCore/heap/CopiedSpace.cpp
+++ b/Source/JavaScriptCore/heap/CopiedSpace.cpp
@@ -28,6 +28,7 @@
#include "CopiedSpaceInlineMethods.h"
#include "GCActivityCallback.h"
+#include "Options.h"
namespace JSC {
@@ -36,6 +37,7 @@ CopiedSpace::CopiedSpace(Heap* heap)
, m_toSpace(0)
, m_fromSpace(0)
, m_inCopyingPhase(false)
+ , m_shouldDoCopyPhase(false)
, m_numberOfLoanedBlocks(0)
{
m_toSpaceLock.Init();
@@ -50,7 +52,7 @@ CopiedSpace::~CopiedSpace()
m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_fromSpace->removeHead()));
while (!m_oversizeBlocks.isEmpty())
- CopiedBlock::destroy(m_oversizeBlocks.removeHead()).deallocate();
+ m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(m_oversizeBlocks.removeHead()));
}
void CopiedSpace::init()
@@ -79,15 +81,7 @@ CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr)
{
ASSERT(isOversize(bytes));
- size_t blockSize = WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(CopiedBlock) + bytes);
-
- PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, WTF::pageSize(), OSAllocator::JSGCHeapPages);
- if (!static_cast<bool>(allocation)) {
- *outPtr = 0;
- return false;
- }
-
- CopiedBlock* block = CopiedBlock::create(allocation);
+ CopiedBlock* block = CopiedBlock::create(m_heap->blockAllocator().allocateCustomSize(sizeof(CopiedBlock) + bytes, WTF::pageSize()));
m_oversizeBlocks.push(block);
m_blockFilter.add(reinterpret_cast<Bits>(block));
m_blockSet.add(block);
@@ -97,7 +91,7 @@ CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr)
*outPtr = allocator.forceAllocate(bytes);
allocator.resetCurrentBlock();
- m_heap->didAllocate(blockSize);
+ m_heap->didAllocate(block->region()->blockSize());
return true;
}
@@ -145,22 +139,25 @@ CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, si
CopiedBlock* oldBlock = oversizeBlockFor(oldPtr);
m_oversizeBlocks.remove(oldBlock);
m_blockSet.remove(oldBlock);
- CopiedBlock::destroy(oldBlock).deallocate();
+ m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(oldBlock));
}
*ptr = newPtr;
return true;
}
-void CopiedSpace::doneFillingBlock(CopiedBlock* block)
+void CopiedSpace::doneFillingBlock(CopiedBlock* block, CopiedBlock** exchange)
{
ASSERT(m_inCopyingPhase);
+ if (exchange)
+ *exchange = allocateBlockForCopyingPhase();
+
if (!block)
return;
if (!block->dataSize()) {
- recycleBlock(block);
+ recycleBorrowedBlock(block);
return;
}
@@ -182,6 +179,38 @@ void CopiedSpace::doneFillingBlock(CopiedBlock* block)
}
}
+void CopiedSpace::startedCopying()
+{
+ std::swap(m_fromSpace, m_toSpace);
+
+ m_blockFilter.reset();
+ m_allocator.resetCurrentBlock();
+
+ CopiedBlock* next = 0;
+ size_t totalLiveBytes = 0;
+ size_t totalUsableBytes = 0;
+ for (CopiedBlock* block = m_fromSpace->head(); block; block = next) {
+ next = block->next();
+ if (!block->isPinned() && block->canBeRecycled()) {
+ recycleEvacuatedBlock(block);
+ continue;
+ }
+ totalLiveBytes += block->liveBytes();
+ totalUsableBytes += block->payloadCapacity();
+ }
+
+ double markedSpaceBytes = m_heap->objectSpace().capacity();
+ double totalFragmentation = ((double)totalLiveBytes + markedSpaceBytes) / ((double)totalUsableBytes + markedSpaceBytes);
+ m_shouldDoCopyPhase = totalFragmentation <= Options::minHeapUtilization();
+ if (!m_shouldDoCopyPhase)
+ return;
+
+ ASSERT(m_shouldDoCopyPhase);
+ ASSERT(!m_inCopyingPhase);
+ ASSERT(!m_numberOfLoanedBlocks);
+ m_inCopyingPhase = true;
+}
+
void CopiedSpace::doneCopying()
{
{
@@ -190,12 +219,13 @@ void CopiedSpace::doneCopying()
m_loanedBlocksCondition.wait(m_loanedBlocksLock);
}
- ASSERT(m_inCopyingPhase);
+ ASSERT(m_inCopyingPhase == m_shouldDoCopyPhase);
m_inCopyingPhase = false;
+
while (!m_fromSpace->isEmpty()) {
CopiedBlock* block = m_fromSpace->removeHead();
- if (block->m_isPinned) {
- block->m_isPinned = false;
+ if (block->isPinned() || !m_shouldDoCopyPhase) {
+ block->didSurviveGC();
// We don't add the block to the blockSet because it was never removed.
ASSERT(m_blockSet.contains(block));
m_blockFilter.add(reinterpret_cast<Bits>(block));
@@ -210,13 +240,13 @@ void CopiedSpace::doneCopying()
CopiedBlock* curr = m_oversizeBlocks.head();
while (curr) {
CopiedBlock* next = curr->next();
- if (!curr->m_isPinned) {
+ if (!curr->isPinned()) {
m_oversizeBlocks.remove(curr);
m_blockSet.remove(curr);
- CopiedBlock::destroy(curr).deallocate();
+ m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(curr));
} else {
m_blockFilter.add(reinterpret_cast<Bits>(curr));
- curr->m_isPinned = false;
+ curr->didSurviveGC();
}
curr = next;
}
@@ -225,6 +255,8 @@ void CopiedSpace::doneCopying()
allocateBlock();
else
m_allocator.setCurrentBlock(m_toSpace->head());
+
+ m_shouldDoCopyPhase = false;
}
size_t CopiedSpace::size()
diff --git a/Source/JavaScriptCore/heap/CopiedSpace.h b/Source/JavaScriptCore/heap/CopiedSpace.h
index e8a4f8724..3a698e8dc 100644
--- a/Source/JavaScriptCore/heap/CopiedSpace.h
+++ b/Source/JavaScriptCore/heap/CopiedSpace.h
@@ -46,6 +46,7 @@ class Heap;
class CopiedBlock;
class CopiedSpace {
+ friend class CopyVisitor;
friend class SlotVisitor;
friend class JIT;
public:
@@ -74,6 +75,7 @@ public:
size_t capacity();
bool isPagedOut(double deadline);
+ bool shouldDoCopyPhase() { return m_shouldDoCopyPhase; }
static CopiedBlock* blockFor(void*);
@@ -88,8 +90,9 @@ private:
void allocateBlock();
CopiedBlock* allocateBlockForCopyingPhase();
- void doneFillingBlock(CopiedBlock*);
- void recycleBlock(CopiedBlock*);
+ void doneFillingBlock(CopiedBlock*, CopiedBlock**);
+ void recycleEvacuatedBlock(CopiedBlock*);
+ void recycleBorrowedBlock(CopiedBlock*);
Heap* m_heap;
@@ -108,14 +111,15 @@ private:
DoublyLinkedList<CopiedBlock> m_oversizeBlocks;
bool m_inCopyingPhase;
+ bool m_shouldDoCopyPhase;
Mutex m_loanedBlocksLock;
ThreadCondition m_loanedBlocksCondition;
size_t m_numberOfLoanedBlocks;
- static const size_t s_maxAllocationSize = 32 * KB;
+ static const size_t s_maxAllocationSize = CopiedBlock::blockSize / 2;
static const size_t s_initialBlockNum = 16;
- static const size_t s_blockMask = ~(CopiedBlock::s_blockSize - 1);
+ static const size_t s_blockMask = ~(CopiedBlock::blockSize - 1);
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h b/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h
index 790a302de..01e816793 100644
--- a/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h
+++ b/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h
@@ -93,19 +93,20 @@ inline void CopiedSpace::pinIfNecessary(void* opaquePointer)
pin(block);
}
-inline void CopiedSpace::startedCopying()
+inline void CopiedSpace::recycleEvacuatedBlock(CopiedBlock* block)
{
- std::swap(m_fromSpace, m_toSpace);
-
- m_blockFilter.reset();
- m_allocator.resetCurrentBlock();
-
- ASSERT(!m_inCopyingPhase);
- ASSERT(!m_numberOfLoanedBlocks);
- m_inCopyingPhase = true;
+ ASSERT(block);
+ ASSERT(block->canBeRecycled());
+ ASSERT(!block->m_isPinned);
+ {
+ SpinLockHolder locker(&m_toSpaceLock);
+ m_blockSet.remove(block);
+ m_fromSpace->remove(block);
+ }
+ m_heap->blockAllocator().deallocate(CopiedBlock::destroy(block));
}
-inline void CopiedSpace::recycleBlock(CopiedBlock* block)
+inline void CopiedSpace::recycleBorrowedBlock(CopiedBlock* block)
{
m_heap->blockAllocator().deallocate(CopiedBlock::destroy(block));
@@ -121,7 +122,7 @@ inline void CopiedSpace::recycleBlock(CopiedBlock* block)
inline CopiedBlock* CopiedSpace::allocateBlockForCopyingPhase()
{
ASSERT(m_inCopyingPhase);
- CopiedBlock* block = CopiedBlock::createNoZeroFill(m_heap->blockAllocator().allocate());
+ CopiedBlock* block = CopiedBlock::createNoZeroFill(m_heap->blockAllocator().allocate<CopiedBlock>());
{
MutexLocker locker(m_loanedBlocksLock);
@@ -139,7 +140,7 @@ inline void CopiedSpace::allocateBlock()
m_allocator.resetCurrentBlock();
- CopiedBlock* block = CopiedBlock::create(m_heap->blockAllocator().allocate());
+ CopiedBlock* block = CopiedBlock::create(m_heap->blockAllocator().allocate<CopiedBlock>());
m_toSpace->push(block);
m_blockFilter.add(reinterpret_cast<Bits>(block));
diff --git a/Source/JavaScriptCore/heap/CopyVisitor.cpp b/Source/JavaScriptCore/heap/CopyVisitor.cpp
new file mode 100644
index 000000000..ae826f0d2
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CopyVisitor.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CopyVisitor.h"
+
+#include "CopyVisitorInlineMethods.h"
+#include "GCThreadSharedData.h"
+#include "JSCell.h"
+#include "JSObject.h"
+#include <wtf/Threading.h>
+
+namespace JSC {
+
+CopyVisitor::CopyVisitor(GCThreadSharedData& shared)
+ : m_shared(shared)
+{
+}
+
+void CopyVisitor::copyFromShared()
+{
+ GCCopyPhaseFunctor functor(*this);
+ Vector<MarkedBlock*>& blocksToCopy = m_shared.m_blocksToCopy;
+ size_t startIndex, endIndex;
+
+ m_shared.getNextBlocksToCopy(startIndex, endIndex);
+ while (startIndex < endIndex) {
+ for (size_t i = startIndex; i < endIndex; i++)
+ blocksToCopy[i]->forEachLiveCell(functor);
+ m_shared.getNextBlocksToCopy(startIndex, endIndex);
+ }
+ ASSERT(startIndex == endIndex);
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/CopyVisitor.h b/Source/JavaScriptCore/heap/CopyVisitor.h
new file mode 100644
index 000000000..45a2e0ad9
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CopyVisitor.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CopyVisitor_h
+#define CopyVisitor_h
+
+#include "CopiedSpace.h"
+
+namespace JSC {
+
+class GCThreadSharedData;
+
+class CopyVisitor {
+public:
+ CopyVisitor(GCThreadSharedData&);
+
+ void copyFromShared();
+
+ void startCopying();
+ void doneCopying();
+
+ // Low-level API for copying, appropriate for cases where the object's heap references
+ // are discontiguous or if the object occurs frequently enough that you need to focus on
+ // performance. Use this with care as it is easy to shoot yourself in the foot.
+ bool checkIfShouldCopy(void*, size_t);
+ void* allocateNewSpace(size_t);
+ void didCopy(void*, size_t);
+
+private:
+ void* allocateNewSpaceSlow(size_t);
+
+ GCThreadSharedData& m_shared;
+ CopiedAllocator m_copiedAllocator;
+};
+
+} // namespace JSC
+
+#endif
diff --git a/Source/JavaScriptCore/heap/CopyVisitorInlineMethods.h b/Source/JavaScriptCore/heap/CopyVisitorInlineMethods.h
new file mode 100644
index 000000000..73400750f
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CopyVisitorInlineMethods.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CopyVisitorInlineMethods_h
+#define CopyVisitorInlineMethods_h
+
+#include "ClassInfo.h"
+#include "CopyVisitor.h"
+#include "GCThreadSharedData.h"
+#include "JSCell.h"
+#include "JSDestructibleObject.h"
+
+namespace JSC {
+
+class GCCopyPhaseFunctor : public MarkedBlock::VoidFunctor {
+public:
+ GCCopyPhaseFunctor(CopyVisitor& visitor)
+ : m_visitor(visitor)
+ {
+ }
+
+ void operator()(JSCell* cell)
+ {
+ Structure* structure = cell->structure();
+ if (!structure->outOfLineCapacity() && !hasIndexedProperties(structure->indexingType()))
+ return;
+ ASSERT(structure->classInfo()->methodTable.copyBackingStore == JSObject::copyBackingStore);
+ JSObject::copyBackingStore(cell, m_visitor);
+ }
+
+private:
+ CopyVisitor& m_visitor;
+};
+
+inline bool CopyVisitor::checkIfShouldCopy(void* oldPtr, size_t bytes)
+{
+ if (CopiedSpace::isOversize(bytes)) {
+ ASSERT(CopiedSpace::oversizeBlockFor(oldPtr)->isPinned());
+ return false;
+ }
+
+ if (CopiedSpace::blockFor(oldPtr)->isPinned())
+ return false;
+
+ return true;
+}
+
+inline void* CopyVisitor::allocateNewSpace(size_t bytes)
+{
+ void* result = 0; // Compilers don't realize that this will be assigned.
+ if (LIKELY(m_copiedAllocator.tryAllocate(bytes, &result)))
+ return result;
+
+ result = allocateNewSpaceSlow(bytes);
+ ASSERT(result);
+ return result;
+}
+
+inline void* CopyVisitor::allocateNewSpaceSlow(size_t bytes)
+{
+ CopiedBlock* newBlock = 0;
+ m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock(), &newBlock);
+ m_copiedAllocator.setCurrentBlock(newBlock);
+
+ void* result = 0;
+ CheckedBoolean didSucceed = m_copiedAllocator.tryAllocate(bytes, &result);
+ ASSERT(didSucceed);
+ return result;
+}
+
+inline void CopyVisitor::startCopying()
+{
+ ASSERT(!m_copiedAllocator.isValid());
+ CopiedBlock* block = 0;
+ m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock(), &block);
+ m_copiedAllocator.setCurrentBlock(block);
+}
+
+inline void CopyVisitor::doneCopying()
+{
+ if (!m_copiedAllocator.isValid())
+ return;
+
+ m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock(), 0);
+}
+
+inline void CopyVisitor::didCopy(void* ptr, size_t bytes)
+{
+ ASSERT(!CopiedSpace::isOversize(bytes));
+ CopiedBlock* block = CopiedSpace::blockFor(ptr);
+ ASSERT(!block->isPinned());
+
+ if (block->didEvacuateBytes(bytes))
+ m_shared.m_copiedSpace->recycleEvacuatedBlock(block);
+}
+
+} // namespace JSC
+
+#endif
diff --git a/Source/JavaScriptCore/heap/GCThread.cpp b/Source/JavaScriptCore/heap/GCThread.cpp
new file mode 100644
index 000000000..ea43456bd
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCThread.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GCThread.h"
+
+#include "CopyVisitor.h"
+#include "CopyVisitorInlineMethods.h"
+#include "GCThreadSharedData.h"
+#include "SlotVisitor.h"
+#include <wtf/MainThread.h>
+#include <wtf/PassOwnPtr.h>
+
+namespace JSC {
+
+GCThread::GCThread(GCThreadSharedData& shared, SlotVisitor* slotVisitor, CopyVisitor* copyVisitor)
+ : m_threadID(0)
+ , m_shared(shared)
+ , m_slotVisitor(WTF::adoptPtr(slotVisitor))
+ , m_copyVisitor(WTF::adoptPtr(copyVisitor))
+{
+}
+
+ThreadIdentifier GCThread::threadID()
+{
+ ASSERT(m_threadID);
+ return m_threadID;
+}
+
+void GCThread::initializeThreadID(ThreadIdentifier threadID)
+{
+ ASSERT(!m_threadID);
+ m_threadID = threadID;
+}
+
+SlotVisitor* GCThread::slotVisitor()
+{
+ ASSERT(m_slotVisitor);
+ return m_slotVisitor.get();
+}
+
+CopyVisitor* GCThread::copyVisitor()
+{
+ ASSERT(m_copyVisitor);
+ return m_copyVisitor.get();
+}
+
+GCPhase GCThread::waitForNextPhase()
+{
+ MutexLocker locker(m_shared.m_phaseLock);
+ while (m_shared.m_currentPhase == NoPhase)
+ m_shared.m_phaseCondition.wait(m_shared.m_phaseLock);
+ return m_shared.m_currentPhase;
+}
+
+void GCThread::gcThreadMain()
+{
+ GCPhase currentPhase;
+#if ENABLE(PARALLEL_GC)
+ WTF::registerGCThread();
+#endif
+ // Wait for the main thread to finish creating and initializing us. The main thread grabs this lock before
+ // creating this thread. We aren't guaranteed to have a valid threadID until the main thread releases this lock.
+ {
+ MutexLocker locker(m_shared.m_markingLock);
+ }
+ {
+ ParallelModeEnabler enabler(*m_slotVisitor);
+ while ((currentPhase = waitForNextPhase()) != Exit) {
+ // Note: Each phase is responsible for its own termination conditions. The comments below describe
+ // how each phase reaches termination.
+ switch (currentPhase) {
+ case Mark:
+ m_slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
+ // GCThreads only return from drainFromShared() if the main thread sets the m_parallelMarkersShouldExit
+ // flag in the GCThreadSharedData. The only way the main thread sets that flag is if it realizes
+ // that all of the various subphases in Heap::markRoots() have been fully finished and there is
+ // no more marking work to do and all of the GCThreads are idle, meaning no more work can be generated.
+ break;
+ case Copy:
+ // We don't have to call startCopying() because it's called for us on the main thread to avoid a
+ // race condition.
+ m_copyVisitor->copyFromShared();
+ // We know we're done copying when we return from copyFromShared() because we would
+ // only do so if there were no more chunks of copying work left to do. When there is no
+ // more copying work to do, the main thread will wait in CopiedSpace::doneCopying() until
+ // all of the blocks that the GCThreads borrowed have been returned. doneCopying()
+ // returns our borrowed CopiedBlock, allowing the copying phase to finish.
+ m_copyVisitor->doneCopying();
+ break;
+ case NoPhase:
+ ASSERT_NOT_REACHED();
+ break;
+ case Exit:
+ ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+ }
+}
+
+void GCThread::gcThreadStartFunc(void* data)
+{
+ GCThread* thread = static_cast<GCThread*>(data);
+ thread->gcThreadMain();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/GCThread.h b/Source/JavaScriptCore/heap/GCThread.h
new file mode 100644
index 000000000..0d218f975
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCThread.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GCThread_h
+#define GCThread_h
+
+#include <GCThreadSharedData.h>
+#include <wtf/Deque.h>
+#include <wtf/OwnPtr.h>
+#include <wtf/Threading.h>
+
+namespace JSC {
+
+class CopyVisitor;
+class GCThreadSharedData;
+class SlotVisitor;
+
+class GCThread {
+public:
+ GCThread(GCThreadSharedData&, SlotVisitor*, CopyVisitor*);
+
+ SlotVisitor* slotVisitor();
+ CopyVisitor* copyVisitor();
+ ThreadIdentifier threadID();
+ void initializeThreadID(ThreadIdentifier);
+
+ static void gcThreadStartFunc(void*);
+
+private:
+ void gcThreadMain();
+ GCPhase waitForNextPhase();
+
+ ThreadIdentifier m_threadID;
+ GCThreadSharedData& m_shared;
+ OwnPtr<SlotVisitor> m_slotVisitor;
+ OwnPtr<CopyVisitor> m_copyVisitor;
+};
+
+} // namespace JSC
+
+#endif
diff --git a/Source/JavaScriptCore/heap/GCThreadSharedData.cpp b/Source/JavaScriptCore/heap/GCThreadSharedData.cpp
index 23a6b97a1..d9946d589 100644
--- a/Source/JavaScriptCore/heap/GCThreadSharedData.cpp
+++ b/Source/JavaScriptCore/heap/GCThreadSharedData.cpp
@@ -26,45 +26,30 @@
#include "config.h"
#include "GCThreadSharedData.h"
+#include "CopyVisitor.h"
+#include "CopyVisitorInlineMethods.h"
+#include "GCThread.h"
#include "JSGlobalData.h"
#include "MarkStack.h"
#include "SlotVisitor.h"
#include "SlotVisitorInlineMethods.h"
-#include <wtf/MainThread.h>
namespace JSC {
#if ENABLE(PARALLEL_GC)
void GCThreadSharedData::resetChildren()
{
- for (unsigned i = 0; i < m_markingThreadsMarkStack.size(); ++i)
- m_markingThreadsMarkStack[i]->reset();
+ for (size_t i = 0; i < m_gcThreads.size(); ++i)
+ m_gcThreads[i]->slotVisitor()->reset();
}
size_t GCThreadSharedData::childVisitCount()
{
unsigned long result = 0;
- for (unsigned i = 0; i < m_markingThreadsMarkStack.size(); ++i)
- result += m_markingThreadsMarkStack[i]->visitCount();
+ for (unsigned i = 0; i < m_gcThreads.size(); ++i)
+ result += m_gcThreads[i]->slotVisitor()->visitCount();
return result;
}
-
-void GCThreadSharedData::markingThreadMain(SlotVisitor* slotVisitor)
-{
- WTF::registerGCThread();
- {
- ParallelModeEnabler enabler(*slotVisitor);
- slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
- }
- delete slotVisitor;
-}
-
-void GCThreadSharedData::markingThreadStartFunc(void* myVisitor)
-{
- SlotVisitor* slotVisitor = static_cast<SlotVisitor*>(myVisitor);
-
- slotVisitor->sharedData().markingThreadMain(slotVisitor);
-}
#endif
GCThreadSharedData::GCThreadSharedData(JSGlobalData* globalData)
@@ -74,13 +59,21 @@ GCThreadSharedData::GCThreadSharedData(JSGlobalData* globalData)
, m_sharedMarkStack(m_segmentAllocator)
, m_numberOfActiveParallelMarkers(0)
, m_parallelMarkersShouldExit(false)
+ , m_blocksToCopy(globalData->heap.m_blockSnapshot)
+ , m_copyIndex(0)
+ , m_currentPhase(NoPhase)
{
+ m_copyLock.Init();
#if ENABLE(PARALLEL_GC)
+ // Grab the lock so the new GC threads can be properly initialized before they start running.
+ MutexLocker locker(m_markingLock);
for (unsigned i = 1; i < Options::numberOfGCMarkers(); ++i) {
SlotVisitor* slotVisitor = new SlotVisitor(*this);
- m_markingThreadsMarkStack.append(slotVisitor);
- m_markingThreads.append(createThread(markingThreadStartFunc, slotVisitor, "JavaScriptCore::Marking"));
- ASSERT(m_markingThreads.last());
+ CopyVisitor* copyVisitor = new CopyVisitor(*this);
+ GCThread* newThread = new GCThread(*this, slotVisitor, copyVisitor);
+ ThreadIdentifier threadID = createThread(GCThread::gcThreadStartFunc, newThread, "JavaScriptCore::Marking");
+ newThread->initializeThreadID(threadID);
+ m_gcThreads.append(newThread);
}
#endif
}
@@ -90,19 +83,22 @@ GCThreadSharedData::~GCThreadSharedData()
#if ENABLE(PARALLEL_GC)
// Destroy our marking threads.
{
- MutexLocker locker(m_markingLock);
+ MutexLocker markingLocker(m_markingLock);
+ MutexLocker phaseLocker(m_phaseLock);
+ ASSERT(m_currentPhase == NoPhase);
m_parallelMarkersShouldExit = true;
- m_markingCondition.broadcast();
+ m_currentPhase = Exit;
+ m_phaseCondition.broadcast();
+ }
+ for (unsigned i = 0; i < m_gcThreads.size(); ++i) {
+ waitForThreadCompletion(m_gcThreads[i]->threadID());
+ delete m_gcThreads[i];
}
- for (unsigned i = 0; i < m_markingThreads.size(); ++i)
- waitForThreadCompletion(m_markingThreads[i]);
#endif
}
void GCThreadSharedData::reset()
{
- ASSERT(!m_numberOfActiveParallelMarkers);
- ASSERT(!m_parallelMarkersShouldExit);
ASSERT(m_sharedMarkStack.isEmpty());
#if ENABLE(PARALLEL_GC)
@@ -119,4 +115,53 @@ void GCThreadSharedData::reset()
}
}
+void GCThreadSharedData::didStartMarking()
+{
+ MutexLocker markingLocker(m_markingLock);
+ MutexLocker phaseLocker(m_phaseLock);
+ ASSERT(m_currentPhase == NoPhase);
+ m_currentPhase = Mark;
+ m_parallelMarkersShouldExit = false;
+ m_phaseCondition.broadcast();
+}
+
+void GCThreadSharedData::didFinishMarking()
+{
+ MutexLocker markingLocker(m_markingLock);
+ MutexLocker phaseLocker(m_phaseLock);
+ ASSERT(m_currentPhase == Mark);
+ m_currentPhase = NoPhase;
+ m_parallelMarkersShouldExit = true;
+ m_markingCondition.broadcast();
+}
+
+void GCThreadSharedData::didStartCopying()
+{
+ {
+ SpinLockHolder locker(&m_copyLock);
+ m_blocksToCopy = m_globalData->heap.m_blockSnapshot;
+ m_copyIndex = 0;
+ }
+
+ // We do this here so that we avoid a race condition where the main thread can
+ // blow through all of the copying work before the GCThreads fully wake up.
+ // The GCThreads then request a block from the CopiedSpace when the copying phase
+ // has completed, which isn't allowed.
+ for (size_t i = 0; i < m_gcThreads.size(); i++)
+ m_gcThreads[i]->copyVisitor()->startCopying();
+
+ MutexLocker locker(m_phaseLock);
+ ASSERT(m_currentPhase == NoPhase);
+ m_currentPhase = Copy;
+ m_phaseCondition.broadcast();
+}
+
+void GCThreadSharedData::didFinishCopying()
+{
+ MutexLocker locker(m_phaseLock);
+ ASSERT(m_currentPhase == Copy);
+ m_currentPhase = NoPhase;
+ m_phaseCondition.broadcast();
+}
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/GCThreadSharedData.h b/Source/JavaScriptCore/heap/GCThreadSharedData.h
index 3f09a2820..bd48d9263 100644
--- a/Source/JavaScriptCore/heap/GCThreadSharedData.h
+++ b/Source/JavaScriptCore/heap/GCThreadSharedData.h
@@ -28,16 +28,27 @@
#include "ListableHandler.h"
#include "MarkStack.h"
+#include "MarkedBlock.h"
#include "UnconditionalFinalizer.h"
#include "WeakReferenceHarvester.h"
#include <wtf/HashSet.h>
+#include <wtf/TCSpinLock.h>
#include <wtf/Threading.h>
#include <wtf/Vector.h>
namespace JSC {
+class GCThread;
class JSGlobalData;
class CopiedSpace;
+class CopyVisitor;
+
+enum GCPhase {
+ NoPhase,
+ Mark,
+ Copy,
+ Exit
+};
class GCThreadSharedData {
public:
@@ -46,6 +57,11 @@ public:
void reset();
+ void didStartMarking();
+ void didFinishMarking();
+ void didStartCopying();
+ void didFinishCopying();
+
#if ENABLE(PARALLEL_GC)
void resetChildren();
size_t childVisitCount();
@@ -53,12 +69,11 @@ public:
#endif
private:
+ friend class GCThread;
friend class SlotVisitor;
+ friend class CopyVisitor;
-#if ENABLE(PARALLEL_GC)
- void markingThreadMain(SlotVisitor*);
- static void markingThreadStartFunc(void* heap);
-#endif
+ void getNextBlocksToCopy(size_t&, size_t&);
JSGlobalData* m_globalData;
CopiedSpace* m_copiedSpace;
@@ -67,9 +82,8 @@ private:
bool m_shouldHashConst;
- Vector<ThreadIdentifier> m_markingThreads;
- Vector<SlotVisitor*> m_markingThreadsMarkStack;
-
+ Vector<GCThread*> m_gcThreads;
+
Mutex m_markingLock;
ThreadCondition m_markingCondition;
MarkStackArray m_sharedMarkStack;
@@ -79,10 +93,27 @@ private:
Mutex m_opaqueRootsLock;
HashSet<void*> m_opaqueRoots;
+ SpinLock m_copyLock;
+ Vector<MarkedBlock*>& m_blocksToCopy;
+ size_t m_copyIndex;
+ static const size_t s_blockFragmentLength = 32;
+
+ Mutex m_phaseLock;
+ ThreadCondition m_phaseCondition;
+ GCPhase m_currentPhase;
+
ListableHandler<WeakReferenceHarvester>::List m_weakReferenceHarvesters;
ListableHandler<UnconditionalFinalizer>::List m_unconditionalFinalizers;
};
+inline void GCThreadSharedData::getNextBlocksToCopy(size_t& start, size_t& end)
+{
+ SpinLockHolder locker(&m_copyLock);
+ start = m_copyIndex;
+ end = std::min(m_blocksToCopy.size(), m_copyIndex + s_blockFragmentLength);
+ m_copyIndex = end;
+}
+
} // namespace JSC
#endif
diff --git a/Source/JavaScriptCore/heap/Heap.cpp b/Source/JavaScriptCore/heap/Heap.cpp
index ca936ebfc..772d85144 100644
--- a/Source/JavaScriptCore/heap/Heap.cpp
+++ b/Source/JavaScriptCore/heap/Heap.cpp
@@ -21,12 +21,14 @@
#include "config.h"
#include "Heap.h"
-#include "CopiedSpace.h"
-#include "CopiedSpaceInlineMethods.h"
#include "CodeBlock.h"
#include "ConservativeRoots.h"
+#include "CopiedSpace.h"
+#include "CopiedSpaceInlineMethods.h"
+#include "CopyVisitorInlineMethods.h"
#include "GCActivityCallback.h"
#include "HeapRootVisitor.h"
+#include "HeapStatistics.h"
#include "IncrementalSweeper.h"
#include "Interpreter.h"
#include "JSGlobalData.h"
@@ -235,74 +237,6 @@ inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
return m_typeCountSet.release();
}
-class StorageStatistics : public MarkedBlock::VoidFunctor {
-public:
- StorageStatistics();
-
- void operator()(JSCell*);
-
- size_t objectWithOutOfLineStorageCount();
- size_t objectCount();
-
- size_t storageSize();
- size_t storageCapacity();
-
-private:
- size_t m_objectWithOutOfLineStorageCount;
- size_t m_objectCount;
- size_t m_storageSize;
- size_t m_storageCapacity;
-};
-
-inline StorageStatistics::StorageStatistics()
- : m_objectWithOutOfLineStorageCount(0)
- , m_objectCount(0)
- , m_storageSize(0)
- , m_storageCapacity(0)
-{
-}
-
-inline void StorageStatistics::operator()(JSCell* cell)
-{
- if (!cell->isObject())
- return;
-
- JSObject* object = jsCast<JSObject*>(cell);
- if (hasIndexedProperties(object->structure()->indexingType()))
- return;
-
- if (object->structure()->isUncacheableDictionary())
- return;
-
- ++m_objectCount;
- if (!object->hasInlineStorage())
- ++m_objectWithOutOfLineStorageCount;
- m_storageSize += object->structure()->totalStorageSize() * sizeof(WriteBarrierBase<Unknown>);
- m_storageCapacity += object->structure()->totalStorageCapacity() * sizeof(WriteBarrierBase<Unknown>);
-}
-
-inline size_t StorageStatistics::objectWithOutOfLineStorageCount()
-{
- return m_objectWithOutOfLineStorageCount;
-}
-
-inline size_t StorageStatistics::objectCount()
-{
- return m_objectCount;
-}
-
-
-inline size_t StorageStatistics::storageSize()
-{
- return m_storageSize;
-}
-
-
-inline size_t StorageStatistics::storageCapacity()
-{
- return m_storageCapacity;
-}
-
} // anonymous namespace
Heap::Heap(JSGlobalData* globalData, HeapType heapType)
@@ -319,6 +253,7 @@ Heap::Heap(JSGlobalData* globalData, HeapType heapType)
, m_machineThreads(this)
, m_sharedData(globalData)
, m_slotVisitor(m_sharedData)
+ , m_copyVisitor(m_sharedData)
, m_handleSet(globalData)
, m_isSafeToCollect(false)
, m_globalData(globalData)
@@ -422,7 +357,7 @@ void Heap::markProtectedObjects(HeapRootVisitor& heapRootVisitor)
{
ProtectCountSet::iterator end = m_protectedValues.end();
for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
- heapRootVisitor.visit(&it->first);
+ heapRootVisitor.visit(&it->key);
}
void Heap::pushTempSortVector(Vector<ValueStringPair>* tempVector)
@@ -462,19 +397,19 @@ void Heap::finalizeUnconditionalFinalizers()
m_slotVisitor.finalizeUnconditionalFinalizers();
}
-inline RegisterFile& Heap::registerFile()
+inline JSStack& Heap::stack()
{
- return m_globalData->interpreter->registerFile();
+ return m_globalData->interpreter->stack();
}
void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
{
ASSERT(isValidThreadState(m_globalData));
- ConservativeRoots registerFileRoots(&m_objectSpace.blocks(), &m_storageSpace);
- registerFile().gatherConservativeRoots(registerFileRoots);
- size_t registerFileRootCount = registerFileRoots.size();
- JSCell** registerRoots = registerFileRoots.roots();
- for (size_t i = 0; i < registerFileRootCount; i++) {
+ ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
+ stack().gatherConservativeRoots(stackRoots);
+ size_t stackRootCount = stackRoots.size();
+ JSCell** registerRoots = stackRoots.roots();
+ for (size_t i = 0; i < stackRootCount; i++) {
setMarked(registerRoots[i]);
roots.add(registerRoots[i]);
}
@@ -503,12 +438,12 @@ void Heap::markRoots(bool fullGC)
m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
}
- ConservativeRoots registerFileRoots(&m_objectSpace.blocks(), &m_storageSpace);
+ ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
m_dfgCodeBlocks.clearMarks();
{
- GCPHASE(GatherRegisterFileRoots);
- registerFile().gatherConservativeRoots(
- registerFileRoots, m_jitStubRoutines, m_dfgCodeBlocks);
+ GCPHASE(GatherStackRoots);
+ stack().gatherConservativeRoots(
+ stackRoots, m_jitStubRoutines, m_dfgCodeBlocks);
}
#if ENABLE(DFG_JIT)
@@ -531,7 +466,7 @@ void Heap::markRoots(bool fullGC)
m_objectSpace.clearMarks();
}
- m_storageSpace.startedCopying();
+ m_sharedData.didStartMarking();
SlotVisitor& visitor = m_slotVisitor;
visitor.setup();
HeapRootVisitor heapRootVisitor(visitor);
@@ -563,9 +498,9 @@ void Heap::markRoots(bool fullGC)
visitor.donateAndDrain();
}
{
- GCPHASE(VisitRegisterFileRoots);
- MARK_LOG_ROOT(visitor, "Register File");
- visitor.append(registerFileRoots);
+ GCPHASE(VisitStackRoots);
+ MARK_LOG_ROOT(visitor, "Stack");
+ visitor.append(stackRoots);
visitor.donateAndDrain();
}
#if ENABLE(DFG_JIT)
@@ -656,7 +591,7 @@ void Heap::markRoots(bool fullGC)
GCCOUNTER(VisitedValueCount, visitor.visitCount());
- visitor.doneCopying();
+ m_sharedData.didFinishMarking();
#if ENABLE(OBJECT_MARK_LOGGING)
size_t visitCount = visitor.visitCount();
#if ENABLE(PARALLEL_GC)
@@ -670,7 +605,23 @@ void Heap::markRoots(bool fullGC)
m_sharedData.resetChildren();
#endif
m_sharedData.reset();
- m_storageSpace.doneCopying();
+}
+
+void Heap::copyBackingStores()
+{
+ m_storageSpace.startedCopying();
+ if (m_storageSpace.shouldDoCopyPhase()) {
+ m_sharedData.didStartCopying();
+ CopyVisitor& visitor = m_copyVisitor;
+ visitor.startCopying();
+ visitor.copyFromShared();
+ visitor.doneCopying();
+ // We need to wait for everybody to finish and return their CopiedBlocks
+ // before signaling that the phase is complete.
+ m_storageSpace.doneCopying();
+ m_sharedData.didFinishCopying();
+ } else
+ m_storageSpace.doneCopying();
}
size_t Heap::objectCount()
@@ -801,6 +752,14 @@ void Heap::collect(SweepToggle sweepToggle)
JAVASCRIPTCORE_GC_MARKED();
{
+ m_blockSnapshot.resize(m_objectSpace.blocks().set().size());
+ MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
+ m_objectSpace.forEachBlock(functor);
+ }
+
+ copyBackingStores();
+
+ {
GCPHASE(FinalizeUnconditionalFinalizers);
finalizeUnconditionalFinalizers();
}
@@ -822,7 +781,7 @@ void Heap::collect(SweepToggle sweepToggle)
m_objectSpace.shrink();
}
- m_sweeper->startSweeping(m_objectSpace.blocks().set());
+ m_sweeper->startSweeping(m_blockSnapshot);
m_bytesAbandoned = 0;
{
@@ -831,6 +790,9 @@ void Heap::collect(SweepToggle sweepToggle)
}
size_t currentHeapSize = size();
+ if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
+ HeapStatistics::exitWithFailure();
+
if (fullGC) {
m_sizeAfterLastCollect = currentHeapSize;
@@ -844,6 +806,8 @@ void Heap::collect(SweepToggle sweepToggle)
double lastGCEndTime = WTF::currentTime();
m_lastGCLength = lastGCEndTime - lastGCStartTime;
+ if (Options::recordGCPauseTimes())
+ HeapStatistics::recordGCPauseTime(lastGCStartTime, lastGCEndTime);
if (m_operationInProgress != Collection)
CRASH();
m_operationInProgress = NoOperation;
@@ -855,31 +819,8 @@ void Heap::collect(SweepToggle sweepToggle)
if (Options::objectsAreImmortal())
markDeadObjects();
- if (Options::showHeapStatistics())
- showStatistics();
-}
-
-void Heap::showStatistics()
-{
- dataLog("\n=== Heap Statistics: ===\n");
- dataLog("size: %ldkB\n", static_cast<long>(m_sizeAfterLastCollect / KB));
- dataLog("capacity: %ldkB\n", static_cast<long>(capacity() / KB));
- dataLog("pause time: %lfms\n\n", m_lastGCLength);
-
- StorageStatistics storageStatistics;
- m_objectSpace.forEachLiveCell(storageStatistics);
- dataLog("wasted .property storage: %ldkB (%ld%%)\n",
- static_cast<long>(
- (storageStatistics.storageCapacity() - storageStatistics.storageSize()) / KB),
- static_cast<long>(
- (storageStatistics.storageCapacity() - storageStatistics.storageSize()) * 100
- / storageStatistics.storageCapacity()));
- dataLog("objects with out-of-line .property storage: %ld (%ld%%)\n",
- static_cast<long>(
- storageStatistics.objectWithOutOfLineStorageCount()),
- static_cast<long>(
- storageStatistics.objectWithOutOfLineStorageCount() * 100
- / storageStatistics.objectCount()));
+ if (Options::showObjectStatistics())
+ HeapStatistics::showObjectStatistics(this);
}
void Heap::markDeadObjects()
@@ -942,11 +883,6 @@ void Heap::addCompiledCode(ExecutableBase* executable)
m_compiledCode.append(executable);
}
-bool Heap::isSafeToSweepStructures()
-{
- return !m_sweeper || m_sweeper->structuresCanBeSwept();
-}
-
void Heap::didStartVMShutdown()
{
m_activityCallback->didStartVMShutdown();
diff --git a/Source/JavaScriptCore/heap/Heap.h b/Source/JavaScriptCore/heap/Heap.h
index 92efff7c5..88dc201a4 100644
--- a/Source/JavaScriptCore/heap/Heap.h
+++ b/Source/JavaScriptCore/heap/Heap.h
@@ -23,6 +23,7 @@
#define Heap_h
#include "BlockAllocator.h"
+#include "CopyVisitor.h"
#include "DFGCodeBlocks.h"
#include "GCThreadSharedData.h"
#include "HandleSet.h"
@@ -32,6 +33,7 @@
#include "MarkedBlock.h"
#include "MarkedBlockSet.h"
#include "MarkedSpace.h"
+#include "Options.h"
#include "SlotVisitor.h"
#include "WeakHandleOwner.h"
#include "WriteBarrierSupport.h"
@@ -54,11 +56,11 @@ namespace JSC {
class JITStubRoutine;
class JSCell;
class JSGlobalData;
+ class JSStack;
class JSValue;
class LiveObjectIterator;
class LLIntOffsetsExtractor;
class MarkedArgumentBuffer;
- class RegisterFile;
class WeakGCHandlePool;
class SlotVisitor;
@@ -112,7 +114,8 @@ namespace JSC {
MarkedAllocator& firstAllocatorWithoutDestructors() { return m_objectSpace.firstAllocator(); }
MarkedAllocator& allocatorForObjectWithoutDestructor(size_t bytes) { return m_objectSpace.allocatorFor(bytes); }
- MarkedAllocator& allocatorForObjectWithDestructor(size_t bytes) { return m_objectSpace.destructorAllocatorFor(bytes); }
+ MarkedAllocator& allocatorForObjectWithNormalDestructor(size_t bytes) { return m_objectSpace.normalDestructorAllocatorFor(bytes); }
+ MarkedAllocator& allocatorForObjectWithImmortalStructureDestructor(size_t bytes) { return m_objectSpace.immortalStructureDestructorAllocatorFor(bytes); }
CopiedAllocator& storageAllocator() { return m_storageSpace.allocator(); }
CheckedBoolean tryAllocateStorage(size_t, void**);
CheckedBoolean tryReallocateStorage(void**, size_t, size_t);
@@ -169,7 +172,6 @@ namespace JSC {
void didAbandon(size_t);
bool isPagedOut(double deadline);
- bool isSafeToSweepStructures();
void didStartVMShutdown();
private:
@@ -181,13 +183,16 @@ namespace JSC {
friend class MarkedAllocator;
friend class MarkedBlock;
friend class CopiedSpace;
+ friend class CopyVisitor;
friend class SlotVisitor;
+ friend class IncrementalSweeper;
+ friend class HeapStatistics;
template<typename T> friend void* allocateCell(Heap&);
template<typename T> friend void* allocateCell(Heap&, size_t);
- void* allocateWithDestructor(size_t);
- void* allocateWithoutDestructor(size_t);
- void* allocateStructure(size_t);
+ void* allocateWithImmortalStructureDestructor(size_t); // For use with special objects whose Structures never die.
+ void* allocateWithNormalDestructor(size_t); // For use with objects that inherit directly or indirectly from JSDestructibleObject.
+ void* allocateWithoutDestructor(size_t); // For use with objects without destructors.
static const size_t minExtraCost = 256;
static const size_t maxExtraCost = 1024 * 1024;
@@ -202,13 +207,14 @@ namespace JSC {
void markRoots(bool fullGC);
void markProtectedObjects(HeapRootVisitor&);
void markTempSortVectors(HeapRootVisitor&);
+ void copyBackingStores();
void harvestWeakReferences();
void finalizeUnconditionalFinalizers();
void deleteUnmarkedCompiledCode();
void zombifyDeadObjects();
void markDeadObjects();
- RegisterFile& registerFile();
+ JSStack& stack();
BlockAllocator& blockAllocator();
const HeapType m_heapType;
@@ -237,6 +243,7 @@ namespace JSC {
GCThreadSharedData m_sharedData;
SlotVisitor m_slotVisitor;
+ CopyVisitor m_copyVisitor;
HandleSet m_handleSet;
HandleStack m_handleStack;
@@ -254,10 +261,26 @@ namespace JSC {
GCActivityCallback* m_activityCallback;
IncrementalSweeper* m_sweeper;
+ Vector<MarkedBlock*> m_blockSnapshot;
+ };
+
+ struct MarkedBlockSnapshotFunctor : public MarkedBlock::VoidFunctor {
+ MarkedBlockSnapshotFunctor(Vector<MarkedBlock*>& blocks)
+ : m_index(0)
+ , m_blocks(blocks)
+ {
+ }
+
+ void operator()(MarkedBlock* block) { m_blocks[m_index++] = block; }
+
+ size_t m_index;
+ Vector<MarkedBlock*>& m_blocks;
};
inline bool Heap::shouldCollect()
{
+ if (Options::gcMaxHeapSize())
+ return m_bytesAllocated > Options::gcMaxHeapSize() && m_isSafeToCollect && m_operationInProgress == NoOperation;
#if ENABLE(GGC)
return m_objectSpace.nurseryWaterMark() >= m_minBytesPerCycle && m_isSafeToCollect && m_operationInProgress == NoOperation;
#else
@@ -351,7 +374,7 @@ namespace JSC {
{
ProtectCountSet::iterator end = m_protectedValues.end();
for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
- functor(it->first);
+ functor(it->key);
m_handleSet.forEachStrongHandle(functor, m_protectedValues);
return functor.returnValue();
@@ -363,10 +386,16 @@ namespace JSC {
return forEachProtectedCell(functor);
}
- inline void* Heap::allocateWithDestructor(size_t bytes)
+ inline void* Heap::allocateWithNormalDestructor(size_t bytes)
+ {
+ ASSERT(isValidAllocation(bytes));
+ return m_objectSpace.allocateWithNormalDestructor(bytes);
+ }
+
+ inline void* Heap::allocateWithImmortalStructureDestructor(size_t bytes)
{
ASSERT(isValidAllocation(bytes));
- return m_objectSpace.allocateWithDestructor(bytes);
+ return m_objectSpace.allocateWithImmortalStructureDestructor(bytes);
}
inline void* Heap::allocateWithoutDestructor(size_t bytes)
@@ -375,11 +404,6 @@ namespace JSC {
return m_objectSpace.allocateWithoutDestructor(bytes);
}
- inline void* Heap::allocateStructure(size_t bytes)
- {
- return m_objectSpace.allocateStructure(bytes);
- }
-
inline CheckedBoolean Heap::tryAllocateStorage(size_t bytes, void** outPtr)
{
return m_storageSpace.tryAllocate(bytes, outPtr);
diff --git a/Source/JavaScriptCore/heap/HeapBlock.h b/Source/JavaScriptCore/heap/HeapBlock.h
index a63b7ebe1..677eaacd4 100644
--- a/Source/JavaScriptCore/heap/HeapBlock.h
+++ b/Source/JavaScriptCore/heap/HeapBlock.h
@@ -27,13 +27,14 @@
#define HeapBlock_h
#include <wtf/DoublyLinkedList.h>
-#include <wtf/PageAllocationAligned.h>
#include <wtf/StdLibExtras.h>
namespace JSC {
enum AllocationEffort { AllocationCanFail, AllocationMustSucceed };
+class Region;
+
#if COMPILER(GCC)
#define CLASS_IF_GCC class
#else
@@ -44,30 +45,25 @@ template<typename T>
class HeapBlock : public DoublyLinkedListNode<T> {
friend CLASS_IF_GCC DoublyLinkedListNode<T>;
public:
- static const size_t s_blockSize = 64 * KB;
-
- static PageAllocationAligned destroy(HeapBlock* block)
+ static HeapBlock* destroy(HeapBlock* block)
{
static_cast<T*>(block)->~T();
-
- PageAllocationAligned allocation;
- std::swap(allocation, block->m_allocation);
- return allocation;
+ return block;
}
- HeapBlock(const PageAllocationAligned& allocation)
+ HeapBlock(Region* region)
: DoublyLinkedListNode<T>()
- , m_allocation(allocation)
+ , m_region(region)
, m_prev(0)
, m_next(0)
{
- ASSERT(m_allocation);
+ ASSERT(m_region);
}
- const PageAllocationAligned allocation() const { return m_allocation; }
+ Region* region() const { return m_region; }
private:
- PageAllocationAligned m_allocation;
+ Region* m_region;
T* m_prev;
T* m_next;
};
diff --git a/Source/JavaScriptCore/heap/HeapStatistics.cpp b/Source/JavaScriptCore/heap/HeapStatistics.cpp
new file mode 100644
index 000000000..68044e0b3
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapStatistics.cpp
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "HeapStatistics.h"
+
+#include "Heap.h"
+#include "JSObject.h"
+#include "Options.h"
+#include <stdlib.h>
+#if OS(UNIX)
+#include <sys/resource.h>
+#endif
+#include <wtf/CurrentTime.h>
+#include <wtf/DataLog.h>
+#include <wtf/Deque.h>
+
+namespace JSC {
+
+double HeapStatistics::s_startTime = 0.0;
+double HeapStatistics::s_endTime = 0.0;
+Deque<double>* HeapStatistics::s_pauseTimeStarts = 0;
+Deque<double>* HeapStatistics::s_pauseTimeEnds = 0;
+
+#if OS(UNIX)
+
+void HeapStatistics::initialize()
+{
+ ASSERT(Options::recordGCPauseTimes());
+ s_startTime = WTF::monotonicallyIncreasingTime();
+ s_pauseTimeStarts = new Deque<double>();
+ s_pauseTimeEnds = new Deque<double>();
+}
+
+void HeapStatistics::recordGCPauseTime(double start, double end)
+{
+ ASSERT(Options::recordGCPauseTimes());
+ ASSERT(s_pauseTimeStarts);
+ ASSERT(s_pauseTimeEnds);
+ s_pauseTimeStarts->append(start);
+ s_pauseTimeEnds->append(end);
+}
+
+void HeapStatistics::logStatistics()
+{
+ struct rusage usage;
+ getrusage(RUSAGE_SELF, &usage);
+#if USE(CF) || OS(UNIX)
+ char* vmName = getenv("JSVMName");
+ char* suiteName = getenv("JSSuiteName");
+ char* benchmarkName = getenv("JSBenchmarkName");
+#else
+#error "The HeapStatistics module is not supported on this platform."
+#endif
+ if (!vmName || !suiteName || !benchmarkName)
+ dataLog("HeapStatistics: {\"max_rss\": %ld", usage.ru_maxrss);
+ else
+ dataLog("HeapStatistics: {\"max_rss\": %ld, \"vm_name\": \"%s\", \"suite_name\": \"%s\", \"benchmark_name\": \"%s\"",
+ usage.ru_maxrss, vmName, suiteName, benchmarkName);
+
+ if (Options::recordGCPauseTimes()) {
+ dataLog(", \"pause_times\": [");
+ Deque<double>::iterator startIt = s_pauseTimeStarts->begin();
+ Deque<double>::iterator endIt = s_pauseTimeEnds->begin();
+ if (startIt != s_pauseTimeStarts->end() && endIt != s_pauseTimeEnds->end()) {
+ dataLog("[%f, %f]", *startIt, *endIt);
+ ++startIt;
+ ++endIt;
+ }
+ while (startIt != s_pauseTimeStarts->end() && endIt != s_pauseTimeEnds->end()) {
+ dataLog(", [%f, %f]", *startIt, *endIt);
+ ++startIt;
+ ++endIt;
+ }
+ dataLog("], \"start_time\": %f, \"end_time\": %f", s_startTime, s_endTime);
+ }
+ dataLog("}\n");
+}
+
+void HeapStatistics::exitWithFailure()
+{
+ ASSERT(Options::logHeapStatisticsAtExit());
+ s_endTime = WTF::monotonicallyIncreasingTime();
+ logStatistics();
+ exit(-1);
+}
+
+void HeapStatistics::reportSuccess()
+{
+ ASSERT(Options::logHeapStatisticsAtExit());
+ s_endTime = WTF::monotonicallyIncreasingTime();
+ logStatistics();
+}
+
+#else
+
+void HeapStatistics::initialize()
+{
+}
+
+void HeapStatistics::recordGCPauseTime(double, double)
+{
+}
+
+void HeapStatistics::logStatistics()
+{
+}
+
+void HeapStatistics::exitWithFailure()
+{
+}
+
+void HeapStatistics::reportSuccess()
+{
+}
+
+#endif // OS(UNIX)
+
+size_t HeapStatistics::usedJSHeap()
+{
+ JSGlobalData* globalData = &JSGlobalData::sharedInstance();
+ return globalData->heap.size();
+}
+
+size_t HeapStatistics::parseMemoryAmount(char* s)
+{
+ size_t multiplier = 1;
+ char* afterS;
+ size_t value = strtol(s, &afterS, 10);
+ char next = afterS[0];
+ switch (next) {
+ case 'K':
+ multiplier = KB;
+ break;
+ case 'M':
+ multiplier = MB;
+ break;
+ case 'G':
+ multiplier = GB;
+ break;
+ default:
+ break;
+ }
+ return value * multiplier;
+}
+
+class StorageStatistics : public MarkedBlock::VoidFunctor {
+public:
+ StorageStatistics();
+
+ void operator()(JSCell*);
+
+ size_t objectWithOutOfLineStorageCount();
+ size_t objectCount();
+
+ size_t storageSize();
+ size_t storageCapacity();
+
+private:
+ size_t m_objectWithOutOfLineStorageCount;
+ size_t m_objectCount;
+ size_t m_storageSize;
+ size_t m_storageCapacity;
+};
+
+inline StorageStatistics::StorageStatistics()
+ : m_objectWithOutOfLineStorageCount(0)
+ , m_objectCount(0)
+ , m_storageSize(0)
+ , m_storageCapacity(0)
+{
+}
+
+inline void StorageStatistics::operator()(JSCell* cell)
+{
+ if (!cell->isObject())
+ return;
+
+ JSObject* object = jsCast<JSObject*>(cell);
+ if (hasIndexedProperties(object->structure()->indexingType()))
+ return;
+
+ if (object->structure()->isUncacheableDictionary())
+ return;
+
+ ++m_objectCount;
+ if (!object->hasInlineStorage())
+ ++m_objectWithOutOfLineStorageCount;
+ m_storageSize += object->structure()->totalStorageSize() * sizeof(WriteBarrierBase<Unknown>);
+ m_storageCapacity += object->structure()->totalStorageCapacity() * sizeof(WriteBarrierBase<Unknown>);
+}
+
+inline size_t StorageStatistics::objectWithOutOfLineStorageCount()
+{
+ return m_objectWithOutOfLineStorageCount;
+}
+
+inline size_t StorageStatistics::objectCount()
+{
+ return m_objectCount;
+}
+
+inline size_t StorageStatistics::storageSize()
+{
+ return m_storageSize;
+}
+
+inline size_t StorageStatistics::storageCapacity()
+{
+ return m_storageCapacity;
+}
+
+void HeapStatistics::showObjectStatistics(Heap* heap)
+{
+ dataLog("\n=== Heap Statistics: ===\n");
+ dataLog("size: %ldkB\n", static_cast<long>(heap->m_sizeAfterLastCollect / KB));
+ dataLog("capacity: %ldkB\n", static_cast<long>(heap->capacity() / KB));
+ dataLog("pause time: %lfms\n\n", heap->m_lastGCLength);
+
+ StorageStatistics storageStatistics;
+ heap->m_objectSpace.forEachLiveCell(storageStatistics);
+ dataLog("wasted .property storage: %ldkB (%ld%%)\n",
+ static_cast<long>(
+ (storageStatistics.storageCapacity() - storageStatistics.storageSize()) / KB),
+ static_cast<long>(
+ (storageStatistics.storageCapacity() - storageStatistics.storageSize()) * 100
+ / storageStatistics.storageCapacity()));
+ dataLog("objects with out-of-line .property storage: %ld (%ld%%)\n",
+ static_cast<long>(
+ storageStatistics.objectWithOutOfLineStorageCount()),
+ static_cast<long>(
+ storageStatistics.objectWithOutOfLineStorageCount() * 100
+ / storageStatistics.objectCount()));
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HeapStatistics.h b/Source/JavaScriptCore/heap/HeapStatistics.h
new file mode 100644
index 000000000..34d05af7c
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapStatistics.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HeapStatistics_h
+#define HeapStatistics_h
+
+#include <wtf/Deque.h>
+
+namespace JSC {
+
+class Heap;
+
+class HeapStatistics {
+public:
+ NO_RETURN static void exitWithFailure();
+ JS_EXPORT_PRIVATE static void reportSuccess();
+ JS_EXPORT_PRIVATE static size_t usedJSHeap();
+
+ static void initialize();
+ static void recordGCPauseTime(double start, double end);
+ static size_t parseMemoryAmount(char*);
+
+ static void showObjectStatistics(Heap*);
+
+ static const size_t KB = 1024;
+ static const size_t MB = 1024 * KB;
+ static const size_t GB = 1024 * MB;
+
+private:
+ static void logStatistics();
+ static Deque<double>* s_pauseTimeStarts;
+ static Deque<double>* s_pauseTimeEnds;
+ static double s_startTime;
+ static double s_endTime;
+};
+
+} // namespace JSC
+
+#endif
diff --git a/Source/JavaScriptCore/heap/IncrementalSweeper.cpp b/Source/JavaScriptCore/heap/IncrementalSweeper.cpp
index bd1342f2a..4aec4dd51 100644
--- a/Source/JavaScriptCore/heap/IncrementalSweeper.cpp
+++ b/Source/JavaScriptCore/heap/IncrementalSweeper.cpp
@@ -48,7 +48,7 @@ static const double sweepTimeMultiplier = 1.0 / sweepTimeTotal;
IncrementalSweeper::IncrementalSweeper(Heap* heap, CFRunLoopRef runLoop)
: HeapTimer(heap->globalData(), runLoop)
, m_currentBlockToSweepIndex(0)
- , m_structuresCanBeSwept(false)
+ , m_blocksToSweep(heap->m_blockSnapshot)
{
}
@@ -72,7 +72,6 @@ void IncrementalSweeper::cancelTimer()
IncrementalSweeper::IncrementalSweeper(Heap* heap)
: HeapTimer(heap->globalData())
, m_currentBlockToSweepIndex(0)
- , m_structuresCanBeSwept(false)
{
}
@@ -119,10 +118,6 @@ void IncrementalSweeper::sweepNextBlock()
{
while (m_currentBlockToSweepIndex < m_blocksToSweep.size()) {
MarkedBlock* block = m_blocksToSweep[m_currentBlockToSweepIndex++];
- if (block->onlyContainsStructures())
- m_structuresCanBeSwept = true;
- else
- ASSERT(!m_structuresCanBeSwept);
if (!block->needsSweeping())
continue;
@@ -133,20 +128,16 @@ void IncrementalSweeper::sweepNextBlock()
}
}
-void IncrementalSweeper::startSweeping(const HashSet<MarkedBlock*>& blockSnapshot)
+void IncrementalSweeper::startSweeping(Vector<MarkedBlock*>& blockSnapshot)
{
- m_blocksToSweep.resize(blockSnapshot.size());
- CopyFunctor functor(m_blocksToSweep);
- m_globalData->heap.objectSpace().forEachBlock(functor);
+ m_blocksToSweep = blockSnapshot;
m_currentBlockToSweepIndex = 0;
- m_structuresCanBeSwept = false;
scheduleTimer();
}
void IncrementalSweeper::willFinishSweeping()
{
m_currentBlockToSweepIndex = 0;
- m_structuresCanBeSwept = true;
m_blocksToSweep.clear();
if (m_globalData)
cancelTimer();
@@ -156,7 +147,6 @@ void IncrementalSweeper::willFinishSweeping()
IncrementalSweeper::IncrementalSweeper(JSGlobalData* globalData)
: HeapTimer(globalData)
- , m_structuresCanBeSwept(false)
{
}
@@ -169,14 +159,12 @@ IncrementalSweeper* IncrementalSweeper::create(Heap* heap)
return new IncrementalSweeper(heap->globalData());
}
-void IncrementalSweeper::startSweeping(const HashSet<MarkedBlock*>&)
+void IncrementalSweeper::startSweeping(Vector<MarkedBlock*>&)
{
- m_structuresCanBeSwept = false;
}
void IncrementalSweeper::willFinishSweeping()
{
- m_structuresCanBeSwept = true;
}
void IncrementalSweeper::sweepNextBlock()
@@ -185,9 +173,4 @@ void IncrementalSweeper::sweepNextBlock()
#endif
-bool IncrementalSweeper::structuresCanBeSwept()
-{
- return m_structuresCanBeSwept;
-}
-
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/IncrementalSweeper.h b/Source/JavaScriptCore/heap/IncrementalSweeper.h
index 03c620f9c..5b9267bc7 100644
--- a/Source/JavaScriptCore/heap/IncrementalSweeper.h
+++ b/Source/JavaScriptCore/heap/IncrementalSweeper.h
@@ -37,26 +37,12 @@ namespace JSC {
class Heap;
-struct CopyFunctor : public MarkedBlock::VoidFunctor {
- CopyFunctor(Vector<MarkedBlock*>& blocks)
- : m_index(0)
- , m_blocks(blocks)
- {
- }
-
- void operator()(MarkedBlock* block) { m_blocks[m_index++] = block; }
-
- size_t m_index;
- Vector<MarkedBlock*>& m_blocks;
-};
-
class IncrementalSweeper : public HeapTimer {
public:
static IncrementalSweeper* create(Heap*);
- void startSweeping(const HashSet<MarkedBlock*>& blockSnapshot);
+ void startSweeping(Vector<MarkedBlock*>&);
virtual void doWork();
void sweepNextBlock();
- bool structuresCanBeSwept();
void willFinishSweeping();
private:
@@ -72,13 +58,12 @@ private:
void cancelTimer();
unsigned m_currentBlockToSweepIndex;
- Vector<MarkedBlock*> m_blocksToSweep;
+ Vector<MarkedBlock*>& m_blocksToSweep;
#else
IncrementalSweeper(JSGlobalData*);
#endif
- bool m_structuresCanBeSwept;
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/JITStubRoutineSet.cpp b/Source/JavaScriptCore/heap/JITStubRoutineSet.cpp
index 5e4ca36e0..a37dc6f5c 100644
--- a/Source/JavaScriptCore/heap/JITStubRoutineSet.cpp
+++ b/Source/JavaScriptCore/heap/JITStubRoutineSet.cpp
@@ -82,7 +82,7 @@ void JITStubRoutineSet::markSlow(uintptr_t address)
if (iter == m_addressToRoutineMap.end())
return;
- iter->second->m_mayBeExecuting = true;
+ iter->value->m_mayBeExecuting = true;
}
void JITStubRoutineSet::deleteUnmarkedJettisonedStubRoutines()
@@ -97,7 +97,7 @@ void JITStubRoutineSet::deleteUnmarkedJettisonedStubRoutines()
uintptr_t step = JITStubRoutine::addressStep();
for (uintptr_t iter = start; iter < end; iter += step) {
ASSERT(m_addressToRoutineMap.find(iter) != m_addressToRoutineMap.end());
- ASSERT(m_addressToRoutineMap.find(iter)->second == routine);
+ ASSERT(m_addressToRoutineMap.find(iter)->value == routine);
m_addressToRoutineMap.remove(iter);
}
diff --git a/Source/JavaScriptCore/heap/MarkedAllocator.cpp b/Source/JavaScriptCore/heap/MarkedAllocator.cpp
index 8a7d02e21..466c9fffe 100644
--- a/Source/JavaScriptCore/heap/MarkedAllocator.cpp
+++ b/Source/JavaScriptCore/heap/MarkedAllocator.cpp
@@ -30,17 +30,6 @@ bool MarkedAllocator::isPagedOut(double deadline)
inline void* MarkedAllocator::tryAllocateHelper(size_t bytes)
{
if (!m_freeList.head) {
- if (m_onlyContainsStructures && !m_heap->isSafeToSweepStructures()) {
- if (m_currentBlock) {
- m_currentBlock->didConsumeFreeList();
- m_currentBlock = 0;
- }
- // We sweep another random block here so that we can make progress
- // toward being able to sweep Structures.
- m_heap->sweeper()->sweepNextBlock();
- return 0;
- }
-
for (MarkedBlock*& block = m_blocksToSweep; block; block = block->next()) {
MarkedBlock::FreeList freeList = block->sweep(MarkedBlock::SweepToFreeList);
if (!freeList.head) {
@@ -122,15 +111,9 @@ MarkedBlock* MarkedAllocator::allocateBlock(size_t bytes)
size_t cellSize = m_cellSize ? m_cellSize : WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(bytes);
- if (blockSize == MarkedBlock::blockSize) {
- PageAllocationAligned allocation = m_heap->blockAllocator().allocate();
- return MarkedBlock::create(allocation, m_heap, cellSize, m_cellsNeedDestruction, m_onlyContainsStructures);
- }
-
- PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, MarkedBlock::blockSize, OSAllocator::JSGCHeapPages);
- if (!static_cast<bool>(allocation))
- CRASH();
- return MarkedBlock::create(allocation, m_heap, cellSize, m_cellsNeedDestruction, m_onlyContainsStructures);
+ if (blockSize == MarkedBlock::blockSize)
+ return MarkedBlock::create(m_heap->blockAllocator().allocate<MarkedBlock>(), this, cellSize, m_destructorType);
+ return MarkedBlock::create(m_heap->blockAllocator().allocateCustomSize(blockSize, MarkedBlock::blockSize), this, cellSize, m_destructorType);
}
void MarkedAllocator::addBlock(MarkedBlock* block)
diff --git a/Source/JavaScriptCore/heap/MarkedAllocator.h b/Source/JavaScriptCore/heap/MarkedAllocator.h
index f9cb6ae52..13bd8e493 100644
--- a/Source/JavaScriptCore/heap/MarkedAllocator.h
+++ b/Source/JavaScriptCore/heap/MarkedAllocator.h
@@ -23,8 +23,7 @@ public:
void reset();
void canonicalizeCellLivenessData();
size_t cellSize() { return m_cellSize; }
- bool cellsNeedDestruction() { return m_cellsNeedDestruction; }
- bool onlyContainsStructures() { return m_onlyContainsStructures; }
+ MarkedBlock::DestructorType destructorType() { return m_destructorType; }
void* allocate(size_t);
Heap* heap() { return m_heap; }
@@ -32,7 +31,7 @@ public:
void addBlock(MarkedBlock*);
void removeBlock(MarkedBlock*);
- void init(Heap*, MarkedSpace*, size_t cellSize, bool cellsNeedDestruction, bool onlyContainsStructures);
+ void init(Heap*, MarkedSpace*, size_t cellSize, MarkedBlock::DestructorType);
bool isPagedOut(double deadline);
@@ -49,8 +48,7 @@ private:
MarkedBlock* m_blocksToSweep;
DoublyLinkedList<MarkedBlock> m_blockList;
size_t m_cellSize;
- bool m_cellsNeedDestruction;
- bool m_onlyContainsStructures;
+ MarkedBlock::DestructorType m_destructorType;
Heap* m_heap;
MarkedSpace* m_markedSpace;
};
@@ -59,20 +57,18 @@ inline MarkedAllocator::MarkedAllocator()
: m_currentBlock(0)
, m_blocksToSweep(0)
, m_cellSize(0)
- , m_cellsNeedDestruction(true)
- , m_onlyContainsStructures(false)
+ , m_destructorType(MarkedBlock::None)
, m_heap(0)
, m_markedSpace(0)
{
}
-inline void MarkedAllocator::init(Heap* heap, MarkedSpace* markedSpace, size_t cellSize, bool cellsNeedDestruction, bool onlyContainsStructures)
+inline void MarkedAllocator::init(Heap* heap, MarkedSpace* markedSpace, size_t cellSize, MarkedBlock::DestructorType destructorType)
{
m_heap = heap;
m_markedSpace = markedSpace;
m_cellSize = cellSize;
- m_cellsNeedDestruction = cellsNeedDestruction;
- m_onlyContainsStructures = onlyContainsStructures;
+ m_destructorType = destructorType;
}
inline void* MarkedAllocator::allocate(size_t bytes)
diff --git a/Source/JavaScriptCore/heap/MarkedBlock.cpp b/Source/JavaScriptCore/heap/MarkedBlock.cpp
index c345080fe..70a24b6ae 100644
--- a/Source/JavaScriptCore/heap/MarkedBlock.cpp
+++ b/Source/JavaScriptCore/heap/MarkedBlock.cpp
@@ -28,26 +28,27 @@
#include "IncrementalSweeper.h"
#include "JSCell.h"
-#include "JSObject.h"
+#include "JSDestructibleObject.h"
namespace JSC {
-MarkedBlock* MarkedBlock::create(const PageAllocationAligned& allocation, Heap* heap, size_t cellSize, bool cellsNeedDestruction, bool onlyContainsStructures)
+MarkedBlock* MarkedBlock::create(DeadBlock* block, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
{
- return new (NotNull, allocation.base()) MarkedBlock(allocation, heap, cellSize, cellsNeedDestruction, onlyContainsStructures);
+ Region* region = block->region();
+ return new (NotNull, block) MarkedBlock(region, allocator, cellSize, destructorType);
}
-MarkedBlock::MarkedBlock(const PageAllocationAligned& allocation, Heap* heap, size_t cellSize, bool cellsNeedDestruction, bool onlyContainsStructures)
- : HeapBlock<MarkedBlock>(allocation)
+MarkedBlock::MarkedBlock(Region* region, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
+ : HeapBlock<MarkedBlock>(region)
, m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
- , m_endAtom(atomsPerBlock - m_atomsPerCell + 1)
- , m_cellsNeedDestruction(cellsNeedDestruction)
- , m_onlyContainsStructures(onlyContainsStructures)
+ , m_endAtom((allocator->cellSize() ? atomsPerBlock : region->blockSize() / atomSize) - m_atomsPerCell + 1)
+ , m_destructorType(destructorType)
+ , m_allocator(allocator)
, m_state(New) // All cells start out unmarked.
- , m_weakSet(heap->globalData())
+ , m_weakSet(allocator->heap()->globalData())
{
- ASSERT(heap);
+ ASSERT(allocator);
HEAP_LOG_BLOCK_STATE_TRANSITION(this);
}
@@ -65,11 +66,11 @@ inline void MarkedBlock::callDestructor(JSCell* cell)
cell->zap();
}
-template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, bool destructorCallNeeded>
+template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, MarkedBlock::DestructorType dtorType>
MarkedBlock::FreeList MarkedBlock::specializedSweep()
{
ASSERT(blockState != Allocated && blockState != FreeListed);
- ASSERT(destructorCallNeeded || sweepMode != SweepOnly);
+ ASSERT(!(dtorType == MarkedBlock::None && sweepMode == SweepOnly));
// This produces a free list that is ordered in reverse through the block.
// This is fine, since the allocation code makes no assumptions about the
@@ -82,7 +83,7 @@ MarkedBlock::FreeList MarkedBlock::specializedSweep()
JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
- if (destructorCallNeeded && blockState != New)
+ if (dtorType != MarkedBlock::None && blockState != New)
callDestructor(cell);
if (sweepMode == SweepToFreeList) {
@@ -103,21 +104,23 @@ MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode)
m_weakSet.sweep();
- if (sweepMode == SweepOnly && !m_cellsNeedDestruction)
+ if (sweepMode == SweepOnly && m_destructorType == MarkedBlock::None)
return FreeList();
- if (m_cellsNeedDestruction)
- return sweepHelper<true>(sweepMode);
- return sweepHelper<false>(sweepMode);
+ if (m_destructorType == MarkedBlock::ImmortalStructure)
+ return sweepHelper<MarkedBlock::ImmortalStructure>(sweepMode);
+ if (m_destructorType == MarkedBlock::Normal)
+ return sweepHelper<MarkedBlock::Normal>(sweepMode);
+ return sweepHelper<MarkedBlock::None>(sweepMode);
}
-template<bool destructorCallNeeded>
+template<MarkedBlock::DestructorType dtorType>
MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode)
{
switch (m_state) {
case New:
ASSERT(sweepMode == SweepToFreeList);
- return specializedSweep<New, SweepToFreeList, destructorCallNeeded>();
+ return specializedSweep<New, SweepToFreeList, dtorType>();
case FreeListed:
// Happens when a block transitions to fully allocated.
ASSERT(sweepMode == SweepToFreeList);
@@ -126,10 +129,9 @@ MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode)
ASSERT_NOT_REACHED();
return FreeList();
case Marked:
- ASSERT(!m_onlyContainsStructures || heap()->isSafeToSweepStructures());
return sweepMode == SweepToFreeList
- ? specializedSweep<Marked, SweepToFreeList, destructorCallNeeded>()
- : specializedSweep<Marked, SweepOnly, destructorCallNeeded>();
+ ? specializedSweep<Marked, SweepToFreeList, dtorType>()
+ : specializedSweep<Marked, SweepOnly, dtorType>();
}
ASSERT_NOT_REACHED();
diff --git a/Source/JavaScriptCore/heap/MarkedBlock.h b/Source/JavaScriptCore/heap/MarkedBlock.h
index 4b2a5fd53..31bf60b9f 100644
--- a/Source/JavaScriptCore/heap/MarkedBlock.h
+++ b/Source/JavaScriptCore/heap/MarkedBlock.h
@@ -22,6 +22,7 @@
#ifndef MarkedBlock_h
#define MarkedBlock_h
+#include "BlockAllocator.h"
#include "CardSet.h"
#include "HeapBlock.h"
@@ -52,6 +53,7 @@ namespace JSC {
class Heap;
class JSCell;
+ class MarkedAllocator;
typedef uintptr_t Bits;
@@ -112,7 +114,8 @@ namespace JSC {
ReturnType m_count;
};
- static MarkedBlock* create(const PageAllocationAligned&, Heap*, size_t cellSize, bool cellsNeedDestruction, bool onlyContainsStructures);
+ enum DestructorType { None, ImmortalStructure, Normal };
+ static MarkedBlock* create(DeadBlock*, MarkedAllocator*, size_t cellSize, DestructorType);
static bool isAtomAligned(const void*);
static MarkedBlock* blockFor(const void*);
@@ -120,6 +123,7 @@ namespace JSC {
void lastChanceToFinalize();
+ MarkedAllocator* allocator() const;
Heap* heap() const;
JSGlobalData* globalData() const;
WeakSet& weakSet();
@@ -143,8 +147,7 @@ namespace JSC {
bool isEmpty();
size_t cellSize();
- bool cellsNeedDestruction();
- bool onlyContainsStructures();
+ DestructorType destructorType();
size_t size();
size_t capacity();
@@ -194,15 +197,15 @@ namespace JSC {
static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two.
enum BlockState { New, FreeListed, Allocated, Marked };
- template<bool destructorCallNeeded> FreeList sweepHelper(SweepMode = SweepOnly);
+ template<DestructorType> FreeList sweepHelper(SweepMode = SweepOnly);
typedef char Atom[atomSize];
- MarkedBlock(const PageAllocationAligned&, Heap*, size_t cellSize, bool cellsNeedDestruction, bool onlyContainsStructures);
+ MarkedBlock(Region*, MarkedAllocator*, size_t cellSize, DestructorType);
Atom* atoms();
size_t atomNumber(const void*);
void callDestructor(JSCell*);
- template<BlockState, SweepMode, bool destructorCallNeeded> FreeList specializedSweep();
+ template<BlockState, SweepMode, DestructorType> FreeList specializedSweep();
#if ENABLE(GGC)
CardSet<bytesPerCard, blockSize> m_cards;
@@ -215,8 +218,8 @@ namespace JSC {
#else
WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic> m_marks;
#endif
- bool m_cellsNeedDestruction;
- bool m_onlyContainsStructures;
+ DestructorType m_destructorType;
+ MarkedAllocator* m_allocator;
BlockState m_state;
WeakSet m_weakSet;
};
@@ -261,6 +264,11 @@ namespace JSC {
sweep();
}
+ inline MarkedAllocator* MarkedBlock::allocator() const
+ {
+ return m_allocator;
+ }
+
inline Heap* MarkedBlock::heap() const
{
return m_weakSet.heap();
@@ -326,14 +334,9 @@ namespace JSC {
return m_atomsPerCell * atomSize;
}
- inline bool MarkedBlock::cellsNeedDestruction()
- {
- return m_cellsNeedDestruction;
- }
-
- inline bool MarkedBlock::onlyContainsStructures()
+ inline MarkedBlock::DestructorType MarkedBlock::destructorType()
{
- return m_onlyContainsStructures;
+ return m_destructorType;
}
inline size_t MarkedBlock::size()
@@ -343,7 +346,7 @@ namespace JSC {
inline size_t MarkedBlock::capacity()
{
- return allocation().size();
+ return region()->blockSize();
}
inline size_t MarkedBlock::atomNumber(const void* p)
diff --git a/Source/JavaScriptCore/heap/MarkedSpace.cpp b/Source/JavaScriptCore/heap/MarkedSpace.cpp
index 9a823c50b..50634dd23 100644
--- a/Source/JavaScriptCore/heap/MarkedSpace.cpp
+++ b/Source/JavaScriptCore/heap/MarkedSpace.cpp
@@ -81,17 +81,20 @@ MarkedSpace::MarkedSpace(Heap* heap)
: m_heap(heap)
{
for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
- allocatorFor(cellSize).init(heap, this, cellSize, false, false);
- destructorAllocatorFor(cellSize).init(heap, this, cellSize, true, false);
+ allocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::None);
+ normalDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::Normal);
+ immortalStructureDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::ImmortalStructure);
}
for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
- allocatorFor(cellSize).init(heap, this, cellSize, false, false);
- destructorAllocatorFor(cellSize).init(heap, this, cellSize, true, false);
+ allocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::None);
+ normalDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::Normal);
+ immortalStructureDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::ImmortalStructure);
}
- m_largeAllocator.init(heap, this, 0, true, false);
- m_structureAllocator.init(heap, this, WTF::roundUpToMultipleOf(32, sizeof(Structure)), true, true);
+ m_normalSpace.largeAllocator.init(heap, this, 0, MarkedBlock::None);
+ m_normalDestructorSpace.largeAllocator.init(heap, this, 0, MarkedBlock::Normal);
+ m_immortalStructureDestructorSpace.largeAllocator.init(heap, this, 0, MarkedBlock::ImmortalStructure);
}
MarkedSpace::~MarkedSpace()
@@ -120,16 +123,19 @@ void MarkedSpace::resetAllocators()
{
for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
allocatorFor(cellSize).reset();
- destructorAllocatorFor(cellSize).reset();
+ normalDestructorAllocatorFor(cellSize).reset();
+ immortalStructureDestructorAllocatorFor(cellSize).reset();
}
for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
allocatorFor(cellSize).reset();
- destructorAllocatorFor(cellSize).reset();
+ normalDestructorAllocatorFor(cellSize).reset();
+ immortalStructureDestructorAllocatorFor(cellSize).reset();
}
- m_largeAllocator.reset();
- m_structureAllocator.reset();
+ m_normalSpace.largeAllocator.reset();
+ m_normalDestructorSpace.largeAllocator.reset();
+ m_immortalStructureDestructorSpace.largeAllocator.reset();
}
void MarkedSpace::visitWeakSets(HeapRootVisitor& heapRootVisitor)
@@ -147,34 +153,40 @@ void MarkedSpace::canonicalizeCellLivenessData()
{
for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
allocatorFor(cellSize).canonicalizeCellLivenessData();
- destructorAllocatorFor(cellSize).canonicalizeCellLivenessData();
+ normalDestructorAllocatorFor(cellSize).canonicalizeCellLivenessData();
+ immortalStructureDestructorAllocatorFor(cellSize).canonicalizeCellLivenessData();
}
for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
allocatorFor(cellSize).canonicalizeCellLivenessData();
- destructorAllocatorFor(cellSize).canonicalizeCellLivenessData();
+ normalDestructorAllocatorFor(cellSize).canonicalizeCellLivenessData();
+ immortalStructureDestructorAllocatorFor(cellSize).canonicalizeCellLivenessData();
}
- m_largeAllocator.canonicalizeCellLivenessData();
- m_structureAllocator.canonicalizeCellLivenessData();
+ m_normalSpace.largeAllocator.canonicalizeCellLivenessData();
+ m_normalDestructorSpace.largeAllocator.canonicalizeCellLivenessData();
+ m_immortalStructureDestructorSpace.largeAllocator.canonicalizeCellLivenessData();
}
bool MarkedSpace::isPagedOut(double deadline)
{
for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
- if (allocatorFor(cellSize).isPagedOut(deadline) || destructorAllocatorFor(cellSize).isPagedOut(deadline))
+ if (allocatorFor(cellSize).isPagedOut(deadline)
+ || normalDestructorAllocatorFor(cellSize).isPagedOut(deadline)
+ || immortalStructureDestructorAllocatorFor(cellSize).isPagedOut(deadline))
return true;
}
for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
- if (allocatorFor(cellSize).isPagedOut(deadline) || destructorAllocatorFor(cellSize).isPagedOut(deadline))
+ if (allocatorFor(cellSize).isPagedOut(deadline)
+ || normalDestructorAllocatorFor(cellSize).isPagedOut(deadline)
+ || immortalStructureDestructorAllocatorFor(cellSize).isPagedOut(deadline))
return true;
}
- if (m_largeAllocator.isPagedOut(deadline))
- return true;
-
- if (m_structureAllocator.isPagedOut(deadline))
+ if (m_normalSpace.largeAllocator.isPagedOut(deadline)
+ || m_normalDestructorSpace.largeAllocator.isPagedOut(deadline)
+ || m_immortalStructureDestructorSpace.largeAllocator.isPagedOut(deadline))
return true;
return false;
@@ -182,14 +194,13 @@ bool MarkedSpace::isPagedOut(double deadline)
void MarkedSpace::freeBlock(MarkedBlock* block)
{
- allocatorFor(block).removeBlock(block);
+ block->allocator()->removeBlock(block);
m_blocks.remove(block);
if (block->capacity() == MarkedBlock::blockSize) {
m_heap->blockAllocator().deallocate(MarkedBlock::destroy(block));
return;
}
-
- MarkedBlock::destroy(block).deallocate();
+ m_heap->blockAllocator().deallocateCustomSize(MarkedBlock::destroy(block));
}
void MarkedSpace::freeOrShrinkBlock(MarkedBlock* block)
diff --git a/Source/JavaScriptCore/heap/MarkedSpace.h b/Source/JavaScriptCore/heap/MarkedSpace.h
index 151099b60..214536ad7 100644
--- a/Source/JavaScriptCore/heap/MarkedSpace.h
+++ b/Source/JavaScriptCore/heap/MarkedSpace.h
@@ -34,8 +34,6 @@
#include <wtf/Noncopyable.h>
#include <wtf/Vector.h>
-#define ASSERT_CLASS_FITS_IN_CELL(class) COMPILE_ASSERT(sizeof(class) <= MarkedSpace::maxCellSize, class_fits_in_cell)
-
namespace JSC {
class Heap;
@@ -68,19 +66,17 @@ struct Capacity : MarkedBlock::CountFunctor {
class MarkedSpace {
WTF_MAKE_NONCOPYABLE(MarkedSpace);
public:
- static const size_t maxCellSize = 2048;
-
MarkedSpace(Heap*);
~MarkedSpace();
void lastChanceToFinalize();
MarkedAllocator& firstAllocator();
MarkedAllocator& allocatorFor(size_t);
- MarkedAllocator& allocatorFor(MarkedBlock*);
- MarkedAllocator& destructorAllocatorFor(size_t);
- void* allocateWithDestructor(size_t);
+ MarkedAllocator& immortalStructureDestructorAllocatorFor(size_t);
+ MarkedAllocator& normalDestructorAllocatorFor(size_t);
+ void* allocateWithNormalDestructor(size_t);
+ void* allocateWithImmortalStructureDestructor(size_t);
void* allocateWithoutDestructor(size_t);
- void* allocateStructure(size_t);
void resetAllocators();
@@ -131,12 +127,12 @@ private:
struct Subspace {
FixedArray<MarkedAllocator, preciseCount> preciseAllocators;
FixedArray<MarkedAllocator, impreciseCount> impreciseAllocators;
+ MarkedAllocator largeAllocator;
};
- Subspace m_destructorSpace;
+ Subspace m_normalDestructorSpace;
+ Subspace m_immortalStructureDestructorSpace;
Subspace m_normalSpace;
- MarkedAllocator m_largeAllocator;
- MarkedAllocator m_structureAllocator;
Heap* m_heap;
MarkedBlockSet m_blocks;
@@ -186,28 +182,27 @@ inline MarkedAllocator& MarkedSpace::allocatorFor(size_t bytes)
return m_normalSpace.preciseAllocators[(bytes - 1) / preciseStep];
if (bytes <= impreciseCutoff)
return m_normalSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
- return m_largeAllocator;
+ return m_normalSpace.largeAllocator;
}
-inline MarkedAllocator& MarkedSpace::allocatorFor(MarkedBlock* block)
+inline MarkedAllocator& MarkedSpace::immortalStructureDestructorAllocatorFor(size_t bytes)
{
- if (block->onlyContainsStructures())
- return m_structureAllocator;
-
- if (block->cellsNeedDestruction())
- return destructorAllocatorFor(block->cellSize());
-
- return allocatorFor(block->cellSize());
+ ASSERT(bytes);
+ if (bytes <= preciseCutoff)
+ return m_immortalStructureDestructorSpace.preciseAllocators[(bytes - 1) / preciseStep];
+ if (bytes <= impreciseCutoff)
+ return m_immortalStructureDestructorSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
+ return m_immortalStructureDestructorSpace.largeAllocator;
}
-inline MarkedAllocator& MarkedSpace::destructorAllocatorFor(size_t bytes)
+inline MarkedAllocator& MarkedSpace::normalDestructorAllocatorFor(size_t bytes)
{
ASSERT(bytes);
if (bytes <= preciseCutoff)
- return m_destructorSpace.preciseAllocators[(bytes - 1) / preciseStep];
+ return m_normalDestructorSpace.preciseAllocators[(bytes - 1) / preciseStep];
if (bytes <= impreciseCutoff)
- return m_normalSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
- return m_largeAllocator;
+ return m_normalDestructorSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
+ return m_normalDestructorSpace.largeAllocator;
}
inline void* MarkedSpace::allocateWithoutDestructor(size_t bytes)
@@ -215,30 +210,33 @@ inline void* MarkedSpace::allocateWithoutDestructor(size_t bytes)
return allocatorFor(bytes).allocate(bytes);
}
-inline void* MarkedSpace::allocateWithDestructor(size_t bytes)
+inline void* MarkedSpace::allocateWithImmortalStructureDestructor(size_t bytes)
{
- return destructorAllocatorFor(bytes).allocate(bytes);
+ return immortalStructureDestructorAllocatorFor(bytes).allocate(bytes);
}
-inline void* MarkedSpace::allocateStructure(size_t bytes)
+inline void* MarkedSpace::allocateWithNormalDestructor(size_t bytes)
{
- return m_structureAllocator.allocate(bytes);
+ return normalDestructorAllocatorFor(bytes).allocate(bytes);
}
template <typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachBlock(Functor& functor)
{
for (size_t i = 0; i < preciseCount; ++i) {
m_normalSpace.preciseAllocators[i].forEachBlock(functor);
- m_destructorSpace.preciseAllocators[i].forEachBlock(functor);
+ m_normalDestructorSpace.preciseAllocators[i].forEachBlock(functor);
+ m_immortalStructureDestructorSpace.preciseAllocators[i].forEachBlock(functor);
}
for (size_t i = 0; i < impreciseCount; ++i) {
m_normalSpace.impreciseAllocators[i].forEachBlock(functor);
- m_destructorSpace.impreciseAllocators[i].forEachBlock(functor);
+ m_normalDestructorSpace.impreciseAllocators[i].forEachBlock(functor);
+ m_immortalStructureDestructorSpace.impreciseAllocators[i].forEachBlock(functor);
}
- m_largeAllocator.forEachBlock(functor);
- m_structureAllocator.forEachBlock(functor);
+ m_normalSpace.largeAllocator.forEachBlock(functor);
+ m_normalDestructorSpace.largeAllocator.forEachBlock(functor);
+ m_immortalStructureDestructorSpace.largeAllocator.forEachBlock(functor);
return functor.returnValue();
}
diff --git a/Source/JavaScriptCore/heap/SlotVisitor.cpp b/Source/JavaScriptCore/heap/SlotVisitor.cpp
index 0f003e79d..26d056feb 100644
--- a/Source/JavaScriptCore/heap/SlotVisitor.cpp
+++ b/Source/JavaScriptCore/heap/SlotVisitor.cpp
@@ -4,7 +4,9 @@
#include "ConservativeRoots.h"
#include "CopiedSpace.h"
#include "CopiedSpaceInlineMethods.h"
+#include "GCThread.h"
#include "JSArray.h"
+#include "JSDestructibleObject.h"
#include "JSGlobalData.h"
#include "JSObject.h"
#include "JSString.h"
@@ -34,8 +36,8 @@ void SlotVisitor::setup()
m_shared.m_shouldHashConst = m_shared.m_globalData->haveEnoughNewStringsToHashConst();
m_shouldHashConst = m_shared.m_shouldHashConst;
#if ENABLE(PARALLEL_GC)
- for (unsigned i = 0; i < m_shared.m_markingThreadsMarkStack.size(); ++i)
- m_shared.m_markingThreadsMarkStack[i]->m_shouldHashConst = m_shared.m_shouldHashConst;
+ for (unsigned i = 0; i < m_shared.m_gcThreads.size(); ++i)
+ m_shared.m_gcThreads[i]->slotVisitor()->m_shouldHashConst = m_shared.m_shouldHashConst;
#endif
}
@@ -180,7 +182,7 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
while (true) {
// Did we reach termination?
if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) {
- // Let any sleeping slaves know it's time for them to give their private CopiedBlocks back
+ // Let any sleeping slaves know it's time for them to return;
m_shared.m_markingCondition.broadcast();
return;
}
@@ -199,17 +201,12 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
m_shared.m_markingCondition.broadcast();
- while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit) {
- if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
- doneCopying();
+ while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit)
m_shared.m_markingCondition.wait(m_shared.m_markingLock);
- }
- // Is the VM exiting? If so, exit this thread.
- if (m_shared.m_parallelMarkersShouldExit) {
- doneCopying();
+ // Is the current phase done? If so, return from this function.
+ if (m_shared.m_parallelMarkersShouldExit)
return;
- }
}
size_t idleThreadCount = Options::numberOfGCMarkers() - m_shared.m_numberOfActiveParallelMarkers;
@@ -235,30 +232,6 @@ void SlotVisitor::mergeOpaqueRoots()
m_opaqueRoots.clear();
}
-void SlotVisitor::startCopying()
-{
- ASSERT(!m_copiedAllocator.isValid());
-}
-
-void* SlotVisitor::allocateNewSpaceSlow(size_t bytes)
-{
- m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock());
- m_copiedAllocator.setCurrentBlock(m_shared.m_copiedSpace->allocateBlockForCopyingPhase());
-
- void* result = 0;
- CheckedBoolean didSucceed = m_copiedAllocator.tryAllocate(bytes, &result);
- ASSERT(didSucceed);
- return result;
-}
-
-void* SlotVisitor::allocateNewSpaceOrPin(void* ptr, size_t bytes)
-{
- if (!checkIfShouldCopyAndPinOtherwise(ptr, bytes))
- return 0;
-
- return allocateNewSpace(bytes);
-}
-
ALWAYS_INLINE bool JSString::tryHashConstLock()
{
#if ENABLE(PARALLEL_GC)
@@ -320,7 +293,7 @@ ALWAYS_INLINE void SlotVisitor::internalAppend(JSValue* slot)
if (addResult.isNewEntry)
string->setHashConstSingleton();
else {
- JSValue existingJSValue = addResult.iterator->second;
+ JSValue existingJSValue = addResult.iterator->value;
if (value != existingJSValue)
jsCast<JSString*>(existingJSValue.asCell())->clearHashConstSingleton();
*slot = existingJSValue;
@@ -334,36 +307,6 @@ ALWAYS_INLINE void SlotVisitor::internalAppend(JSValue* slot)
internalAppend(cell);
}
-void SlotVisitor::copyAndAppend(void** ptr, size_t bytes, JSValue* values, unsigned length)
-{
- void* oldPtr = *ptr;
- void* newPtr = allocateNewSpaceOrPin(oldPtr, bytes);
- if (newPtr) {
- size_t jsValuesOffset = static_cast<size_t>(reinterpret_cast<char*>(values) - static_cast<char*>(oldPtr));
-
- JSValue* newValues = reinterpret_cast_ptr<JSValue*>(static_cast<char*>(newPtr) + jsValuesOffset);
- for (unsigned i = 0; i < length; i++) {
- JSValue& value = values[i];
- newValues[i] = value;
- if (!value)
- continue;
- internalAppend(&newValues[i]);
- }
-
- memcpy(newPtr, oldPtr, jsValuesOffset);
- *ptr = newPtr;
- } else
- append(values, length);
-}
-
-void SlotVisitor::doneCopying()
-{
- if (!m_copiedAllocator.isValid())
- return;
-
- m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock());
-}
-
void SlotVisitor::harvestWeakReferences()
{
for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next())
diff --git a/Source/JavaScriptCore/heap/SlotVisitor.h b/Source/JavaScriptCore/heap/SlotVisitor.h
index 230ed3334..dcd4b75ef 100644
--- a/Source/JavaScriptCore/heap/SlotVisitor.h
+++ b/Source/JavaScriptCore/heap/SlotVisitor.h
@@ -26,7 +26,6 @@
#ifndef SlotVisitor_h
#define SlotVisitor_h
-#include "CopiedSpace.h"
#include "HandleTypes.h"
#include "MarkStackInlineMethods.h"
@@ -80,21 +79,8 @@ public:
void harvestWeakReferences();
void finalizeUnconditionalFinalizers();
- void startCopying();
+ void copyLater(void*, size_t);
- // High-level API for copying, appropriate for cases where the object's heap references
- // fall into a contiguous region of the storage chunk and if the object for which you're
- // doing copying does not occur frequently.
- void copyAndAppend(void**, size_t, JSValue*, unsigned);
-
- // Low-level API for copying, appropriate for cases where the object's heap references
- // are discontiguous or if the object occurs frequently enough that you need to focus on
- // performance. Use this with care as it is easy to shoot yourself in the foot.
- bool checkIfShouldCopyAndPinOtherwise(void* oldPtr, size_t);
- void* allocateNewSpace(size_t);
-
- void doneCopying();
-
#if ENABLE(SIMPLE_HEAP_PROFILING)
VTableSpectrum m_visitedTypeCounts;
#endif
@@ -125,9 +111,6 @@ private:
void mergeOpaqueRootsIfNecessary();
void mergeOpaqueRootsIfProfitable();
- void* allocateNewSpaceOrPin(void*, size_t);
- void* allocateNewSpaceSlow(size_t);
-
void donateKnownParallel();
MarkStackArray m_stack;
@@ -146,8 +129,6 @@ private:
unsigned m_logChildCount;
#endif
- CopiedAllocator m_copiedAllocator;
-
public:
#if !ASSERT_DISABLED
bool m_isCheckingForDefaultMarkViolation;
diff --git a/Source/JavaScriptCore/heap/SlotVisitorInlineMethods.h b/Source/JavaScriptCore/heap/SlotVisitorInlineMethods.h
index 540da3bc4..e5908bf36 100644
--- a/Source/JavaScriptCore/heap/SlotVisitorInlineMethods.h
+++ b/Source/JavaScriptCore/heap/SlotVisitorInlineMethods.h
@@ -136,30 +136,6 @@ inline void SlotVisitor::mergeOpaqueRootsIfProfitable()
mergeOpaqueRoots();
}
-ALWAYS_INLINE bool SlotVisitor::checkIfShouldCopyAndPinOtherwise(void* oldPtr, size_t bytes)
-{
- if (CopiedSpace::isOversize(bytes)) {
- m_shared.m_copiedSpace->pin(CopiedSpace::oversizeBlockFor(oldPtr));
- return false;
- }
-
- if (m_shared.m_copiedSpace->isPinned(oldPtr))
- return false;
-
- return true;
-}
-
-ALWAYS_INLINE void* SlotVisitor::allocateNewSpace(size_t bytes)
-{
- void* result = 0; // Compilers don't realize that this will be assigned.
- if (LIKELY(m_copiedAllocator.tryAllocate(bytes, &result)))
- return result;
-
- result = allocateNewSpaceSlow(bytes);
- ASSERT(result);
- return result;
-}
-
inline void SlotVisitor::donate()
{
ASSERT(m_isInParallelMode);
@@ -175,6 +151,23 @@ inline void SlotVisitor::donateAndDrain()
drain();
}
+inline void SlotVisitor::copyLater(void* ptr, size_t bytes)
+{
+ if (CopiedSpace::isOversize(bytes)) {
+ m_shared.m_copiedSpace->pin(CopiedSpace::oversizeBlockFor(ptr));
+ return;
+ }
+
+ CopiedBlock* block = CopiedSpace::blockFor(ptr);
+ if (block->isPinned())
+ return;
+
+ block->reportLiveBytes(bytes);
+
+ if (!block->shouldEvacuate())
+ m_shared.m_copiedSpace->pin(block);
+}
+
} // namespace JSC
#endif // SlotVisitorInlineMethods_h
diff --git a/Source/JavaScriptCore/heap/Weak.h b/Source/JavaScriptCore/heap/Weak.h
index 07698fd06..3c3d1d0ce 100644
--- a/Source/JavaScriptCore/heap/Weak.h
+++ b/Source/JavaScriptCore/heap/Weak.h
@@ -164,8 +164,8 @@ template<typename Map, typename Key, typename Value> inline void weakRemove(Map&
typename Map::iterator it = map.find(key);
ASSERT_UNUSED(value, value);
ASSERT(it != map.end());
- ASSERT(it->second.was(value));
- ASSERT(!it->second);
+ ASSERT(it->value.was(value));
+ ASSERT(!it->value);
map.remove(it);
}