diff options
author | Simon Hausmann <simon.hausmann@nokia.com> | 2012-02-24 16:36:50 +0100 |
---|---|---|
committer | Simon Hausmann <simon.hausmann@nokia.com> | 2012-02-24 16:36:50 +0100 |
commit | ad0d549d4cc13433f77c1ac8f0ab379c83d93f28 (patch) | |
tree | b34b0daceb7c8e7fdde4b4ec43650ab7caadb0a9 /Source/JavaScriptCore/heap | |
parent | 03e12282df9aa1e1fb05a8b90f1cfc2e08764cec (diff) | |
download | qtwebkit-ad0d549d4cc13433f77c1ac8f0ab379c83d93f28.tar.gz |
Imported WebKit commit bb52bf3c0119e8a128cd93afe5572413a8617de9 (http://svn.webkit.org/repository/webkit/trunk@108790)
Diffstat (limited to 'Source/JavaScriptCore/heap')
23 files changed, 895 insertions, 487 deletions
diff --git a/Source/JavaScriptCore/heap/BumpSpace.cpp b/Source/JavaScriptCore/heap/BumpSpace.cpp deleted file mode 100644 index 4eb0284dd..000000000 --- a/Source/JavaScriptCore/heap/BumpSpace.cpp +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (C) 2011 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "BumpSpace.h" - -#include "BumpSpaceInlineMethods.h" - -namespace JSC { - -CheckedBoolean BumpSpace::tryAllocateSlowCase(size_t bytes, void** outPtr) -{ - if (isOversize(bytes)) - return tryAllocateOversize(bytes, outPtr); - - m_totalMemoryUtilized += static_cast<size_t>(static_cast<char*>(m_currentBlock->m_offset) - m_currentBlock->m_payload); - if (!addNewBlock()) { - *outPtr = 0; - return false; - } - m_toSpaceFilter.add(reinterpret_cast<Bits>(m_currentBlock)); - m_toSpaceSet.add(m_currentBlock); - *outPtr = allocateFromBlock(m_currentBlock, bytes); - return true; -} - -} // namespace JSC diff --git a/Source/JavaScriptCore/heap/ConservativeRoots.cpp b/Source/JavaScriptCore/heap/ConservativeRoots.cpp index a509f06e1..d63faebf3 100644 --- a/Source/JavaScriptCore/heap/ConservativeRoots.cpp +++ b/Source/JavaScriptCore/heap/ConservativeRoots.cpp @@ -26,8 +26,8 @@ #include "config.h" #include "ConservativeRoots.h" -#include "BumpSpace.h" -#include "BumpSpaceInlineMethods.h" +#include "CopiedSpace.h" +#include "CopiedSpaceInlineMethods.h" #include "CodeBlock.h" #include "DFGCodeBlocks.h" #include "JSCell.h" @@ -36,12 +36,12 @@ namespace JSC { -ConservativeRoots::ConservativeRoots(const MarkedBlockSet* blocks, BumpSpace* bumpSpace) +ConservativeRoots::ConservativeRoots(const MarkedBlockSet* blocks, CopiedSpace* copiedSpace) : m_roots(m_inlineRoots) , m_size(0) , m_capacity(inlineCapacity) , m_blocks(blocks) - , m_bumpSpace(bumpSpace) + , m_copiedSpace(copiedSpace) { } @@ -72,9 +72,9 @@ inline void ConservativeRoots::genericAddPointer(void* p, TinyBloomFilter filter { markHook.mark(p); - BumpBlock* block; - if (m_bumpSpace->contains(p, block)) - m_bumpSpace->pin(block); + CopiedBlock* block; + if (m_copiedSpace->contains(p, block)) + m_copiedSpace->pin(block); MarkedBlock* candidate = MarkedBlock::blockFor(p); if (filter.ruleOut(reinterpret_cast<Bits>(candidate))) { diff --git a/Source/JavaScriptCore/heap/ConservativeRoots.h b/Source/JavaScriptCore/heap/ConservativeRoots.h index 40b0996d0..9d9e9ba0c 100644 --- a/Source/JavaScriptCore/heap/ConservativeRoots.h +++ b/Source/JavaScriptCore/heap/ConservativeRoots.h @@ -38,7 +38,7 @@ class Heap; class ConservativeRoots { public: - ConservativeRoots(const MarkedBlockSet*, BumpSpace*); + ConservativeRoots(const MarkedBlockSet*, CopiedSpace*); ~ConservativeRoots(); void add(void* begin, void* end); @@ -63,7 +63,7 @@ private: size_t m_size; size_t m_capacity; const MarkedBlockSet* m_blocks; - BumpSpace* m_bumpSpace; + CopiedSpace* m_copiedSpace; JSCell* m_inlineRoots[inlineCapacity]; }; diff --git a/Source/JavaScriptCore/heap/CopiedAllocator.h b/Source/JavaScriptCore/heap/CopiedAllocator.h new file mode 100644 index 000000000..c5ba50d78 --- /dev/null +++ b/Source/JavaScriptCore/heap/CopiedAllocator.h @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CopiedAllocator_h +#define CopiedAllocator_h + +#include "CopiedBlock.h" + +namespace JSC { + +class CopiedAllocator { +public: + CopiedAllocator(); + void* allocate(size_t); + bool fitsInCurrentBlock(size_t); + bool wasLastAllocation(void*, size_t); + void startedCopying(); + void resetCurrentBlock(CopiedBlock*); + void resetLastAllocation(void*); + size_t currentUtilization(); + +private: + CopiedBlock* currentBlock() { return m_currentBlock; } + + char* m_currentOffset; + CopiedBlock* m_currentBlock; +}; + +inline CopiedAllocator::CopiedAllocator() + : m_currentOffset(0) + , m_currentBlock(0) +{ +} + +inline void* CopiedAllocator::allocate(size_t bytes) +{ + ASSERT(m_currentOffset); + ASSERT(is8ByteAligned(reinterpret_cast<void*>(bytes))); + ASSERT(fitsInCurrentBlock(bytes)); + void* ptr = static_cast<void*>(m_currentOffset); + m_currentOffset += bytes; + ASSERT(is8ByteAligned(ptr)); + return ptr; +} + +inline bool CopiedAllocator::fitsInCurrentBlock(size_t bytes) +{ + return m_currentOffset + bytes < reinterpret_cast<char*>(m_currentBlock) + HeapBlock::s_blockSize && m_currentOffset + bytes > m_currentOffset; +} + +inline bool CopiedAllocator::wasLastAllocation(void* ptr, size_t size) +{ + return static_cast<char*>(ptr) + size == m_currentOffset && ptr > m_currentBlock && ptr < reinterpret_cast<char*>(m_currentBlock) + HeapBlock::s_blockSize; +} + +inline void CopiedAllocator::startedCopying() +{ + if (m_currentBlock) + m_currentBlock->m_offset = static_cast<void*>(m_currentOffset); + m_currentOffset = 0; + m_currentBlock = 0; +} + +inline void CopiedAllocator::resetCurrentBlock(CopiedBlock* newBlock) +{ + if (m_currentBlock) + m_currentBlock->m_offset = static_cast<void*>(m_currentOffset); + m_currentBlock = newBlock; + m_currentOffset = static_cast<char*>(newBlock->m_offset); +} + +inline size_t CopiedAllocator::currentUtilization() +{ + return static_cast<size_t>(m_currentOffset - m_currentBlock->m_payload); +} + +inline void CopiedAllocator::resetLastAllocation(void* ptr) +{ + m_currentOffset = static_cast<char*>(ptr); +} + +} // namespace JSC + +#endif diff --git a/Source/JavaScriptCore/heap/BumpBlock.h b/Source/JavaScriptCore/heap/CopiedBlock.h index b9f271ca8..a57c1150c 100644 --- a/Source/JavaScriptCore/heap/BumpBlock.h +++ b/Source/JavaScriptCore/heap/CopiedBlock.h @@ -23,28 +23,42 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef BumpBlock_h -#define BumpBlock_h +#ifndef CopiedBlock_h +#define CopiedBlock_h #include "HeapBlock.h" +#include "JSValue.h" +#include "JSValueInlineMethods.h" namespace JSC { -class BumpSpace; +class CopiedSpace; -class BumpBlock : public HeapBlock { - friend class BumpSpace; +class CopiedBlock : public HeapBlock { + friend class CopiedSpace; + friend class CopiedAllocator; public: - BumpBlock(PageAllocationAligned& allocation) + CopiedBlock(PageAllocationAligned& allocation) : HeapBlock(allocation) , m_offset(m_payload) , m_isPinned(false) { + ASSERT(is8ByteAligned(static_cast<void*>(m_payload))); +#if USE(JSVALUE64) + memset(static_cast<void*>(m_payload), 0, static_cast<size_t>((reinterpret_cast<char*>(this) + allocation.size()) - m_payload)); +#else + JSValue emptyValue; + JSValue* limit = reinterpret_cast<JSValue*>(reinterpret_cast<char*>(this) + allocation.size()); + for (JSValue* currentValue = reinterpret_cast<JSValue*>(m_payload); currentValue < limit; currentValue++) + *currentValue = emptyValue; +#endif } private: void* m_offset; uintptr_t m_isPinned; + uintptr_t m_padding; + uintptr_t m_dummy; char m_payload[1]; }; diff --git a/Source/JavaScriptCore/heap/BumpSpaceInlineMethods.h b/Source/JavaScriptCore/heap/CopiedSpace.cpp index 3454631b0..3310d2c58 100644 --- a/Source/JavaScriptCore/heap/BumpSpaceInlineMethods.h +++ b/Source/JavaScriptCore/heap/CopiedSpace.cpp @@ -23,21 +23,15 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef BumpSpaceInlineMethods_h -#define BumpSpaceInlineMethods_h +#include "config.h" +#include "CopiedSpace.h" -#include "BumpBlock.h" -#include "BumpSpace.h" -#include "Heap.h" -#include "HeapBlock.h" -#include "JSGlobalData.h" -#include <wtf/CheckedBoolean.h> +#include "CopiedSpaceInlineMethods.h" namespace JSC { -inline BumpSpace::BumpSpace(Heap* heap) +CopiedSpace::CopiedSpace(Heap* heap) : m_heap(heap) - , m_currentBlock(0) , m_toSpace(0) , m_fromSpace(0) , m_totalMemoryAllocated(0) @@ -47,92 +41,116 @@ inline BumpSpace::BumpSpace(Heap* heap) { } -inline void BumpSpace::init() +void CopiedSpace::init() { m_toSpace = &m_blocks1; m_fromSpace = &m_blocks2; - m_totalMemoryAllocated += s_blockSize * s_initialBlockNum; + m_totalMemoryAllocated += HeapBlock::s_blockSize * s_initialBlockNum; if (!addNewBlock()) CRASH(); } -inline bool BumpSpace::contains(void* ptr, BumpBlock*& result) +CheckedBoolean CopiedSpace::tryAllocateSlowCase(size_t bytes, void** outPtr) { - BumpBlock* block = blockFor(ptr); - result = block; - return !m_toSpaceFilter.ruleOut(reinterpret_cast<Bits>(block)) && m_toSpaceSet.contains(block); + if (isOversize(bytes)) + return tryAllocateOversize(bytes, outPtr); + + m_totalMemoryUtilized += m_allocator.currentUtilization(); + if (!addNewBlock()) { + *outPtr = 0; + return false; + } + *outPtr = m_allocator.allocate(bytes); + ASSERT(*outPtr); + return true; } -inline void BumpSpace::pin(BumpBlock* block) +CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr) { - block->m_isPinned = true; + ASSERT(isOversize(bytes)); + + size_t blockSize = WTF::roundUpToMultipleOf<s_pageSize>(sizeof(CopiedBlock) + bytes); + PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, s_pageSize, OSAllocator::JSGCHeapPages); + if (!static_cast<bool>(allocation)) { + *outPtr = 0; + return false; + } + CopiedBlock* block = new (NotNull, allocation.base()) CopiedBlock(allocation); + m_oversizeBlocks.push(block); + ASSERT(is8ByteAligned(block->m_offset)); + + m_oversizeFilter.add(reinterpret_cast<Bits>(block)); + + m_totalMemoryAllocated += blockSize; + m_totalMemoryUtilized += bytes; + + *outPtr = block->m_offset; + return true; } -inline void BumpSpace::startedCopying() +CheckedBoolean CopiedSpace::tryReallocate(void** ptr, size_t oldSize, size_t newSize) { - DoublyLinkedList<HeapBlock>* temp = m_fromSpace; - m_fromSpace = m_toSpace; - m_toSpace = temp; + if (oldSize >= newSize) + return true; + + void* oldPtr = *ptr; + ASSERT(!m_heap->globalData()->isInitializingObject()); - m_toSpaceFilter.reset(); + if (isOversize(oldSize) || isOversize(newSize)) + return tryReallocateOversize(ptr, oldSize, newSize); - m_totalMemoryUtilized = 0; + if (m_allocator.wasLastAllocation(oldPtr, oldSize)) { + m_allocator.resetLastAllocation(oldPtr); + if (m_allocator.fitsInCurrentBlock(newSize)) { + m_totalMemoryUtilized += newSize - oldSize; + return m_allocator.allocate(newSize); + } + } + m_totalMemoryUtilized -= oldSize; - ASSERT(!m_inCopyingPhase); - ASSERT(!m_numberOfLoanedBlocks); - m_inCopyingPhase = true; + void* result = 0; + if (!tryAllocate(newSize, &result)) { + *ptr = 0; + return false; + } + memcpy(result, oldPtr, oldSize); + *ptr = result; + return true; } -inline void BumpSpace::doneCopying() +CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, size_t newSize) { - { - MutexLocker locker(m_loanedBlocksLock); - while (m_numberOfLoanedBlocks > 0) - m_loanedBlocksCondition.wait(m_loanedBlocksLock); - } - - ASSERT(m_inCopyingPhase); - m_inCopyingPhase = false; - while (!m_fromSpace->isEmpty()) { - BumpBlock* block = static_cast<BumpBlock*>(m_fromSpace->removeHead()); - if (block->m_isPinned) { - block->m_isPinned = false; - m_toSpace->push(block); - continue; - } + ASSERT(isOversize(oldSize) || isOversize(newSize)); + ASSERT(newSize > oldSize); - m_toSpaceSet.remove(block); - { - MutexLocker locker(m_heap->m_freeBlockLock); - m_heap->m_freeBlocks.push(block); - m_heap->m_numberOfFreeBlocks++; - } + void* oldPtr = *ptr; + + void* newPtr = 0; + if (!tryAllocateOversize(newSize, &newPtr)) { + *ptr = 0; + return false; } + memcpy(newPtr, oldPtr, oldSize); - BumpBlock* curr = static_cast<BumpBlock*>(m_oversizeBlocks.head()); - while (curr) { - BumpBlock* next = static_cast<BumpBlock*>(curr->next()); - if (!curr->m_isPinned) { - m_oversizeBlocks.remove(curr); - m_totalMemoryAllocated -= curr->m_allocation.size(); - m_totalMemoryUtilized -= curr->m_allocation.size() - sizeof(BumpBlock); - curr->m_allocation.deallocate(); - } else - curr->m_isPinned = false; - curr = next; + if (isOversize(oldSize)) { + CopiedBlock* oldBlock = oversizeBlockFor(oldPtr); + m_oversizeBlocks.remove(oldBlock); + oldBlock->m_allocation.deallocate(); + m_totalMemoryAllocated -= oldSize + sizeof(CopiedBlock); } + + m_totalMemoryUtilized -= oldSize; - if (!(m_currentBlock = static_cast<BumpBlock*>(m_toSpace->head()))) - if (!addNewBlock()) - CRASH(); + *ptr = newPtr; + return true; } -inline void BumpSpace::doneFillingBlock(BumpBlock* block) +void CopiedSpace::doneFillingBlock(CopiedBlock* block) { ASSERT(block); - ASSERT(block->m_offset < reinterpret_cast<char*>(block) + s_blockSize); + ASSERT(block->m_offset < reinterpret_cast<char*>(block) + HeapBlock::s_blockSize); ASSERT(m_inCopyingPhase); if (block->m_offset == block->m_payload) { @@ -161,27 +179,56 @@ inline void BumpSpace::doneFillingBlock(BumpBlock* block) } } -inline void BumpSpace::recycleBlock(BumpBlock* block) +void CopiedSpace::doneCopying() { { - MutexLocker locker(m_heap->m_freeBlockLock); - m_heap->m_freeBlocks.push(block); - m_heap->m_numberOfFreeBlocks++; + MutexLocker locker(m_loanedBlocksLock); + while (m_numberOfLoanedBlocks > 0) + m_loanedBlocksCondition.wait(m_loanedBlocksLock); } - { - MutexLocker locker(m_loanedBlocksLock); - ASSERT(m_numberOfLoanedBlocks > 0); - m_numberOfLoanedBlocks--; - if (!m_numberOfLoanedBlocks) - m_loanedBlocksCondition.signal(); + ASSERT(m_inCopyingPhase); + m_inCopyingPhase = false; + while (!m_fromSpace->isEmpty()) { + CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->removeHead()); + if (block->m_isPinned) { + block->m_isPinned = false; + m_toSpace->push(block); + continue; + } + + m_toSpaceSet.remove(block); + { + MutexLocker locker(m_heap->m_freeBlockLock); + m_heap->m_freeBlocks.push(block); + m_heap->m_numberOfFreeBlocks++; + } } + + CopiedBlock* curr = static_cast<CopiedBlock*>(m_oversizeBlocks.head()); + while (curr) { + CopiedBlock* next = static_cast<CopiedBlock*>(curr->next()); + if (!curr->m_isPinned) { + m_oversizeBlocks.remove(curr); + m_totalMemoryAllocated -= curr->m_allocation.size(); + m_totalMemoryUtilized -= curr->m_allocation.size() - sizeof(CopiedBlock); + curr->m_allocation.deallocate(); + } else + curr->m_isPinned = false; + curr = next; + } + + if (!m_toSpace->head()) { + if (!addNewBlock()) + CRASH(); + } else + m_allocator.resetCurrentBlock(static_cast<CopiedBlock*>(m_toSpace->head())); } -inline CheckedBoolean BumpSpace::getFreshBlock(AllocationEffort allocationEffort, BumpBlock** outBlock) +CheckedBoolean CopiedSpace::getFreshBlock(AllocationEffort allocationEffort, CopiedBlock** outBlock) { HeapBlock* heapBlock = 0; - BumpBlock* block = 0; + CopiedBlock* block = 0; { MutexLocker locker(m_heap->m_freeBlockLock); if (!m_heap->m_freeBlocks.isEmpty()) { @@ -190,7 +237,7 @@ inline CheckedBoolean BumpSpace::getFreshBlock(AllocationEffort allocationEffort } } if (heapBlock) - block = new (NotNull, heapBlock) BumpBlock(heapBlock->m_allocation); + block = new (NotNull, heapBlock) CopiedBlock(heapBlock->m_allocation); else if (allocationEffort == AllocationMustSucceed) { if (!allocateNewBlock(&block)) { *outBlock = 0; @@ -209,192 +256,9 @@ inline CheckedBoolean BumpSpace::getFreshBlock(AllocationEffort allocationEffort } } ASSERT(block); - ASSERT(isPointerAligned(block->m_offset)); + ASSERT(is8ByteAligned(block->m_offset)); *outBlock = block; return true; } -inline CheckedBoolean BumpSpace::borrowBlock(BumpBlock** outBlock) -{ - BumpBlock* block = 0; - if (!getFreshBlock(AllocationMustSucceed, &block)) { - *outBlock = 0; - return false; - } - - ASSERT(m_inCopyingPhase); - MutexLocker locker(m_loanedBlocksLock); - m_numberOfLoanedBlocks++; - - ASSERT(block->m_offset == block->m_payload); - *outBlock = block; - return true; -} - -inline CheckedBoolean BumpSpace::addNewBlock() -{ - BumpBlock* block = 0; - if (!getFreshBlock(AllocationCanFail, &block)) - return false; - - m_toSpace->push(block); - m_currentBlock = block; - return true; -} - -inline CheckedBoolean BumpSpace::allocateNewBlock(BumpBlock** outBlock) -{ - PageAllocationAligned allocation = PageAllocationAligned::allocate(s_blockSize, s_blockSize, OSAllocator::JSGCHeapPages); - if (!static_cast<bool>(allocation)) { - *outBlock = 0; - return false; - } - - { - MutexLocker locker(m_memoryStatsLock); - m_totalMemoryAllocated += s_blockSize; - } - - *outBlock = new (NotNull, allocation.base()) BumpBlock(allocation); - return true; -} - -inline bool BumpSpace::fitsInBlock(BumpBlock* block, size_t bytes) -{ - return static_cast<char*>(block->m_offset) + bytes < reinterpret_cast<char*>(block) + s_blockSize && static_cast<char*>(block->m_offset) + bytes > block->m_offset; -} - -inline bool BumpSpace::fitsInCurrentBlock(size_t bytes) -{ - return fitsInBlock(m_currentBlock, bytes); -} - -inline CheckedBoolean BumpSpace::tryAllocate(size_t bytes, void** outPtr) -{ - ASSERT(!m_heap->globalData()->isInitializingObject()); - - if (isOversize(bytes) || !fitsInCurrentBlock(bytes)) - return tryAllocateSlowCase(bytes, outPtr); - - *outPtr = allocateFromBlock(m_currentBlock, bytes); - return true; -} - -inline CheckedBoolean BumpSpace::tryAllocateOversize(size_t bytes, void** outPtr) -{ - ASSERT(isOversize(bytes)); - - size_t blockSize = WTF::roundUpToMultipleOf<s_pageSize>(sizeof(BumpBlock) + bytes); - PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, s_pageSize, OSAllocator::JSGCHeapPages); - if (!static_cast<bool>(allocation)) { - *outPtr = 0; - return false; - } - BumpBlock* block = new (NotNull, allocation.base()) BumpBlock(allocation); - m_oversizeBlocks.push(block); - ASSERT(isPointerAligned(block->m_offset)); - - m_oversizeFilter.add(reinterpret_cast<Bits>(block)); - - m_totalMemoryAllocated += blockSize; - m_totalMemoryUtilized += bytes; - - *outPtr = block->m_offset; - return true; -} - -inline void* BumpSpace::allocateFromBlock(BumpBlock* block, size_t bytes) -{ - ASSERT(!isOversize(bytes)); - ASSERT(fitsInBlock(block, bytes)); - ASSERT(isPointerAligned(block->m_offset)); - - void* ptr = block->m_offset; - ASSERT(block->m_offset >= block->m_payload && block->m_offset < reinterpret_cast<char*>(block) + s_blockSize); - block->m_offset = static_cast<void*>((static_cast<char*>(ptr) + bytes)); - ASSERT(block->m_offset >= block->m_payload && block->m_offset < reinterpret_cast<char*>(block) + s_blockSize); - - ASSERT(isPointerAligned(ptr)); - return ptr; -} - -inline CheckedBoolean BumpSpace::tryReallocate(void** ptr, size_t oldSize, size_t newSize) -{ - if (oldSize >= newSize) - return true; - - void* oldPtr = *ptr; - ASSERT(!m_heap->globalData()->isInitializingObject()); - - if (isOversize(oldSize) || isOversize(newSize)) - return tryReallocateOversize(ptr, oldSize, newSize); - - if (static_cast<char*>(oldPtr) + oldSize == m_currentBlock->m_offset && oldPtr > m_currentBlock && oldPtr < reinterpret_cast<char*>(m_currentBlock) + s_blockSize) { - m_currentBlock->m_offset = oldPtr; - if (fitsInCurrentBlock(newSize)) { - m_totalMemoryUtilized += newSize - oldSize; - return allocateFromBlock(m_currentBlock, newSize); - } - } - m_totalMemoryUtilized -= oldSize; - - void* result = 0; - if (!tryAllocate(newSize, &result)) { - *ptr = 0; - return false; - } - memcpy(result, oldPtr, oldSize); - *ptr = result; - return true; -} - -inline CheckedBoolean BumpSpace::tryReallocateOversize(void** ptr, size_t oldSize, size_t newSize) -{ - ASSERT(isOversize(oldSize) || isOversize(newSize)); - ASSERT(newSize > oldSize); - - void* oldPtr = *ptr; - - void* newPtr = 0; - if (!tryAllocateOversize(newSize, &newPtr)) { - *ptr = 0; - return false; - } - memcpy(newPtr, oldPtr, oldSize); - - if (isOversize(oldSize)) { - BumpBlock* oldBlock = oversizeBlockFor(oldPtr); - m_oversizeBlocks.remove(oldBlock); - oldBlock->m_allocation.deallocate(); - m_totalMemoryAllocated -= oldSize + sizeof(BumpBlock); - } - - m_totalMemoryUtilized -= oldSize; - - *ptr = newPtr; - return true; -} - -inline bool BumpSpace::isOversize(size_t bytes) -{ - return bytes > s_maxAllocationSize; -} - -inline bool BumpSpace::isPinned(void* ptr) -{ - return blockFor(ptr)->m_isPinned; -} - -inline BumpBlock* BumpSpace::oversizeBlockFor(void* ptr) -{ - return reinterpret_cast<BumpBlock*>(reinterpret_cast<size_t>(ptr) & s_pageMask); -} - -inline BumpBlock* BumpSpace::blockFor(void* ptr) -{ - return reinterpret_cast<BumpBlock*>(reinterpret_cast<size_t>(ptr) & s_blockMask); -} - } // namespace JSC - -#endif diff --git a/Source/JavaScriptCore/heap/BumpSpace.h b/Source/JavaScriptCore/heap/CopiedSpace.h index 30e6b74fe..285e2b9a2 100644 --- a/Source/JavaScriptCore/heap/BumpSpace.h +++ b/Source/JavaScriptCore/heap/CopiedSpace.h @@ -23,9 +23,10 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef BumpSpace_h -#define BumpSpace_h +#ifndef CopiedSpace_h +#define CopiedSpace_h +#include "CopiedAllocator.h" #include "HeapBlock.h" #include "TinyBloomFilter.h" #include <wtf/Assertions.h> @@ -40,13 +41,13 @@ namespace JSC { class Heap; -class BumpBlock; +class CopiedBlock; class HeapBlock; -class BumpSpace { +class CopiedSpace { friend class SlotVisitor; public: - BumpSpace(Heap*); + CopiedSpace(Heap*); void init(); CheckedBoolean tryAllocate(size_t, void**); @@ -56,42 +57,41 @@ public: void doneCopying(); bool isInCopyPhase() { return m_inCopyingPhase; } - void pin(BumpBlock*); + void pin(CopiedBlock*); bool isPinned(void*); - bool contains(void*, BumpBlock*&); + bool contains(void*, CopiedBlock*&); size_t totalMemoryAllocated() { return m_totalMemoryAllocated; } size_t totalMemoryUtilized() { return m_totalMemoryUtilized; } - static BumpBlock* blockFor(void*); + static CopiedBlock* blockFor(void*); private: CheckedBoolean tryAllocateSlowCase(size_t, void**); CheckedBoolean addNewBlock(); - CheckedBoolean allocateNewBlock(BumpBlock**); - bool fitsInCurrentBlock(size_t); + CheckedBoolean allocateNewBlock(CopiedBlock**); - static void* allocateFromBlock(BumpBlock*, size_t); + static void* allocateFromBlock(CopiedBlock*, size_t); CheckedBoolean tryAllocateOversize(size_t, void**); CheckedBoolean tryReallocateOversize(void**, size_t, size_t); static bool isOversize(size_t); - CheckedBoolean borrowBlock(BumpBlock**); - CheckedBoolean getFreshBlock(AllocationEffort, BumpBlock**); - void doneFillingBlock(BumpBlock*); - void recycleBlock(BumpBlock*); - static bool fitsInBlock(BumpBlock*, size_t); - static BumpBlock* oversizeBlockFor(void* ptr); + CheckedBoolean borrowBlock(CopiedBlock**); + CheckedBoolean getFreshBlock(AllocationEffort, CopiedBlock**); + void doneFillingBlock(CopiedBlock*); + void recycleBlock(CopiedBlock*); + static bool fitsInBlock(CopiedBlock*, size_t); + static CopiedBlock* oversizeBlockFor(void* ptr); Heap* m_heap; - BumpBlock* m_currentBlock; + CopiedAllocator m_allocator; TinyBloomFilter m_toSpaceFilter; TinyBloomFilter m_oversizeFilter; - HashSet<BumpBlock*> m_toSpaceSet; + HashSet<CopiedBlock*> m_toSpaceSet; Mutex m_toSpaceLock; Mutex m_memoryStatsLock; @@ -112,12 +112,11 @@ private: ThreadCondition m_loanedBlocksCondition; size_t m_numberOfLoanedBlocks; - static const size_t s_blockSize = 64 * KB; static const size_t s_maxAllocationSize = 32 * KB; static const size_t s_pageSize = 4 * KB; static const size_t s_pageMask = ~(s_pageSize - 1); static const size_t s_initialBlockNum = 16; - static const size_t s_blockMask = ~(s_blockSize - 1); + static const size_t s_blockMask = ~(HeapBlock::s_blockSize - 1); }; } // namespace JSC diff --git a/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h b/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h new file mode 100644 index 000000000..9a1f63cec --- /dev/null +++ b/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h @@ -0,0 +1,184 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CopiedSpaceInlineMethods_h +#define CopiedSpaceInlineMethods_h + +#include "CopiedBlock.h" +#include "CopiedSpace.h" +#include "Heap.h" +#include "HeapBlock.h" +#include "JSGlobalData.h" +#include <wtf/CheckedBoolean.h> + +namespace JSC { + +inline bool CopiedSpace::contains(void* ptr, CopiedBlock*& result) +{ + CopiedBlock* block = blockFor(ptr); + result = block; + return !m_toSpaceFilter.ruleOut(reinterpret_cast<Bits>(block)) && m_toSpaceSet.contains(block); +} + +inline void CopiedSpace::pin(CopiedBlock* block) +{ + block->m_isPinned = true; +} + +inline void CopiedSpace::startedCopying() +{ + DoublyLinkedList<HeapBlock>* temp = m_fromSpace; + m_fromSpace = m_toSpace; + m_toSpace = temp; + + m_toSpaceFilter.reset(); + m_allocator.startedCopying(); + + m_totalMemoryUtilized = 0; + + ASSERT(!m_inCopyingPhase); + ASSERT(!m_numberOfLoanedBlocks); + m_inCopyingPhase = true; +} + +inline void CopiedSpace::recycleBlock(CopiedBlock* block) +{ + { + MutexLocker locker(m_heap->m_freeBlockLock); + m_heap->m_freeBlocks.push(block); + m_heap->m_numberOfFreeBlocks++; + } + + { + MutexLocker locker(m_loanedBlocksLock); + ASSERT(m_numberOfLoanedBlocks > 0); + m_numberOfLoanedBlocks--; + if (!m_numberOfLoanedBlocks) + m_loanedBlocksCondition.signal(); + } +} + +inline CheckedBoolean CopiedSpace::borrowBlock(CopiedBlock** outBlock) +{ + CopiedBlock* block = 0; + if (!getFreshBlock(AllocationMustSucceed, &block)) { + *outBlock = 0; + return false; + } + + ASSERT(m_inCopyingPhase); + MutexLocker locker(m_loanedBlocksLock); + m_numberOfLoanedBlocks++; + + ASSERT(block->m_offset == block->m_payload); + *outBlock = block; + return true; +} + +inline CheckedBoolean CopiedSpace::addNewBlock() +{ + CopiedBlock* block = 0; + if (!getFreshBlock(AllocationCanFail, &block)) + return false; + + m_toSpace->push(block); + m_toSpaceFilter.add(reinterpret_cast<Bits>(block)); + m_toSpaceSet.add(block); + m_allocator.resetCurrentBlock(block); + return true; +} + +inline CheckedBoolean CopiedSpace::allocateNewBlock(CopiedBlock** outBlock) +{ + PageAllocationAligned allocation = PageAllocationAligned::allocate(HeapBlock::s_blockSize, HeapBlock::s_blockSize, OSAllocator::JSGCHeapPages); + if (!static_cast<bool>(allocation)) { + *outBlock = 0; + return false; + } + + { + MutexLocker locker(m_memoryStatsLock); + m_totalMemoryAllocated += HeapBlock::s_blockSize; + } + + *outBlock = new (NotNull, allocation.base()) CopiedBlock(allocation); + return true; +} + +inline bool CopiedSpace::fitsInBlock(CopiedBlock* block, size_t bytes) +{ + return static_cast<char*>(block->m_offset) + bytes < reinterpret_cast<char*>(block) + HeapBlock::s_blockSize && static_cast<char*>(block->m_offset) + bytes > block->m_offset; +} + +inline CheckedBoolean CopiedSpace::tryAllocate(size_t bytes, void** outPtr) +{ + ASSERT(!m_heap->globalData()->isInitializingObject()); + + if (isOversize(bytes) || !m_allocator.fitsInCurrentBlock(bytes)) + return tryAllocateSlowCase(bytes, outPtr); + + *outPtr = m_allocator.allocate(bytes); + ASSERT(*outPtr); + return true; +} + +inline void* CopiedSpace::allocateFromBlock(CopiedBlock* block, size_t bytes) +{ + ASSERT(!isOversize(bytes)); + ASSERT(fitsInBlock(block, bytes)); + ASSERT(is8ByteAligned(block->m_offset)); + + void* ptr = block->m_offset; + ASSERT(block->m_offset >= block->m_payload && block->m_offset < reinterpret_cast<char*>(block) + HeapBlock::s_blockSize); + block->m_offset = static_cast<void*>((static_cast<char*>(ptr) + bytes)); + ASSERT(block->m_offset >= block->m_payload && block->m_offset < reinterpret_cast<char*>(block) + HeapBlock::s_blockSize); + + ASSERT(is8ByteAligned(ptr)); + return ptr; +} + +inline bool CopiedSpace::isOversize(size_t bytes) +{ + return bytes > s_maxAllocationSize; +} + +inline bool CopiedSpace::isPinned(void* ptr) +{ + return blockFor(ptr)->m_isPinned; +} + +inline CopiedBlock* CopiedSpace::oversizeBlockFor(void* ptr) +{ + return reinterpret_cast<CopiedBlock*>(reinterpret_cast<size_t>(ptr) & s_pageMask); +} + +inline CopiedBlock* CopiedSpace::blockFor(void* ptr) +{ + return reinterpret_cast<CopiedBlock*>(reinterpret_cast<size_t>(ptr) & s_blockMask); +} + +} // namespace JSC + +#endif diff --git a/Source/JavaScriptCore/heap/GCAssertions.h b/Source/JavaScriptCore/heap/GCAssertions.h new file mode 100644 index 000000000..f044df6f0 --- /dev/null +++ b/Source/JavaScriptCore/heap/GCAssertions.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef GCAssertions_h +#define GCAssertions_h + +#include "Assertions.h" + +#if ENABLE(GC_VALIDATION) +#define ASSERT_GC_OBJECT_LOOKS_VALID(cell) do { \ + if (!(cell))\ + CRASH();\ + if (cell->unvalidatedStructure()->unvalidatedStructure() != cell->unvalidatedStructure()->unvalidatedStructure()->unvalidatedStructure())\ + CRASH();\ +} while (0) + +#define ASSERT_GC_OBJECT_INHERITS(object, classInfo) do {\ + ASSERT_GC_OBJECT_LOOKS_VALID(object); \ + if (!object->inherits(classInfo)) \ + CRASH();\ +} while (0) + +#else +#define ASSERT_GC_OBJECT_LOOKS_VALID(cell) do { (void)cell; } while (0) +#define ASSERT_GC_OBJECT_INHERITS(object, classInfo) do { (void)object; (void)classInfo; } while (0) +#endif + +#if COMPILER_SUPPORTS(HAS_TRIVIAL_DESTRUCTOR) +#define ASSERT_HAS_TRIVIAL_DESTRUCTOR(klass) COMPILE_ASSERT(__has_trivial_destructor(klass), klass##_has_trivial_destructor_check) +#else +#define ASSERT_HAS_TRIVIAL_DESTRUCTOR(klass) +#endif + +#endif // GCAssertions_h diff --git a/Source/JavaScriptCore/heap/Heap.cpp b/Source/JavaScriptCore/heap/Heap.cpp index 9f5094a58..1333c7b2c 100644 --- a/Source/JavaScriptCore/heap/Heap.cpp +++ b/Source/JavaScriptCore/heap/Heap.cpp @@ -21,8 +21,8 @@ #include "config.h" #include "Heap.h" -#include "BumpSpace.h" -#include "BumpSpaceInlineMethods.h" +#include "CopiedSpace.h" +#include "CopiedSpaceInlineMethods.h" #include "CodeBlock.h" #include "ConservativeRoots.h" #include "GCActivityCallback.h" @@ -77,7 +77,7 @@ struct GCTimer { } ~GCTimer() { - printf("%s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf)\n", m_name, m_time * 1000, m_time * 1000 / m_count, m_min*1000, m_max*1000); + dataLog("%s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf)\n", m_name, m_time * 1000, m_time * 1000 / m_count, m_min*1000, m_max*1000); } double m_time; double m_min; @@ -127,7 +127,7 @@ struct GCCounter { } ~GCCounter() { - printf("%s: %zu values (avg. %zu, min. %zu, max. %zu)\n", m_name, m_total, m_total / m_count, m_min, m_max); + dataLog("%s: %zu values (avg. %zu, min. %zu, max. %zu)\n", m_name, m_total, m_total / m_count, m_min, m_max); } const char* m_name; size_t m_count; @@ -345,7 +345,7 @@ Heap::~Heap() m_blockFreeingThreadShouldQuit = true; m_freeBlockCondition.broadcast(); } - waitForThreadCompletion(m_blockFreeingThread, 0); + waitForThreadCompletion(m_blockFreeingThread); // The destroy function must already have been called, so assert this. ASSERT(!m_globalData); @@ -381,8 +381,8 @@ void Heap::destroy() ASSERT(!size()); #if ENABLE(SIMPLE_HEAP_PROFILING) - m_slotVisitor.m_visitedTypeCounts.dump(stderr, "Visited Type Counts"); - m_destroyedTypeCounts.dump(stderr, "Destroyed Type Counts"); + m_slotVisitor.m_visitedTypeCounts.dump(WTF::dataFile(), "Visited Type Counts"); + m_destroyedTypeCounts.dump(WTF::dataFile(), "Destroyed Type Counts"); #endif releaseFreeBlocks(); @@ -407,10 +407,9 @@ void Heap::waitForRelativeTime(double relative) waitForRelativeTimeWhileHoldingLock(relative); } -void* Heap::blockFreeingThreadStartFunc(void* heap) +void Heap::blockFreeingThreadStartFunc(void* heap) { static_cast<Heap*>(heap)->blockFreeingThreadMain(); - return 0; } void Heap::blockFreeingThreadMain() diff --git a/Source/JavaScriptCore/heap/Heap.h b/Source/JavaScriptCore/heap/Heap.h index 1d0ac5407..bcacee6d5 100644 --- a/Source/JavaScriptCore/heap/Heap.h +++ b/Source/JavaScriptCore/heap/Heap.h @@ -40,7 +40,7 @@ namespace JSC { - class BumpSpace; + class CopiedSpace; class CodeBlock; class GCActivityCallback; class GlobalCodeBlock; @@ -50,6 +50,7 @@ namespace JSC { class JSGlobalData; class JSValue; class LiveObjectIterator; + class LLIntOffsetsExtractor; class MarkedArgumentBuffer; class RegisterFile; class UString; @@ -95,8 +96,9 @@ namespace JSC { // true if an allocation or collection is in progress inline bool isBusy(); - MarkedAllocator& allocatorForObject(size_t bytes) { return m_objectSpace.allocatorFor(bytes); } - void* allocate(size_t); + MarkedAllocator& firstAllocatorWithoutDestructors() { return m_objectSpace.firstAllocator(); } + MarkedAllocator& allocatorForObjectWithoutDestructor(size_t bytes) { return m_objectSpace.allocatorFor(bytes); } + MarkedAllocator& allocatorForObjectWithDestructor(size_t bytes) { return m_objectSpace.destructorAllocatorFor(bytes); } CheckedBoolean tryAllocateStorage(size_t, void**); CheckedBoolean tryReallocateStorage(void**, size_t, size_t); @@ -136,12 +138,17 @@ namespace JSC { void getConservativeRegisterRoots(HashSet<JSCell*>& roots); private: + friend class CodeBlock; + friend class LLIntOffsetsExtractor; friend class MarkedSpace; friend class MarkedAllocator; friend class MarkedBlock; - friend class BumpSpace; + friend class CopiedSpace; friend class SlotVisitor; - friend class CodeBlock; + template<typename T> friend void* allocateCell(Heap&); + + void* allocateWithDestructor(size_t); + void* allocateWithoutDestructor(size_t); size_t waterMark(); size_t highWaterMark(); @@ -183,7 +190,7 @@ namespace JSC { void waitForRelativeTimeWhileHoldingLock(double relative); void waitForRelativeTime(double relative); void blockFreeingThreadMain(); - static void* blockFreeingThreadStartFunc(void* heap); + static void blockFreeingThreadStartFunc(void* heap); const HeapSize m_heapSize; const size_t m_minBytesPerCycle; @@ -193,7 +200,7 @@ namespace JSC { OperationInProgress m_operationInProgress; MarkedSpace m_objectSpace; - BumpSpace m_storageSpace; + CopiedSpace m_storageSpace; DoublyLinkedList<HeapBlock> m_freeBlocks; size_t m_numberOfFreeBlocks; @@ -334,10 +341,16 @@ namespace JSC { return forEachProtectedCell(functor); } - inline void* Heap::allocate(size_t bytes) + inline void* Heap::allocateWithDestructor(size_t bytes) + { + ASSERT(isValidAllocation(bytes)); + return m_objectSpace.allocateWithDestructor(bytes); + } + + inline void* Heap::allocateWithoutDestructor(size_t bytes) { ASSERT(isValidAllocation(bytes)); - return m_objectSpace.allocate(bytes); + return m_objectSpace.allocateWithoutDestructor(bytes); } inline CheckedBoolean Heap::tryAllocateStorage(size_t bytes, void** outPtr) diff --git a/Source/JavaScriptCore/heap/HeapBlock.h b/Source/JavaScriptCore/heap/HeapBlock.h index b0ecb2059..591520d2b 100644 --- a/Source/JavaScriptCore/heap/HeapBlock.h +++ b/Source/JavaScriptCore/heap/HeapBlock.h @@ -28,6 +28,7 @@ #include <wtf/DoublyLinkedList.h> #include <wtf/PageAllocationAligned.h> +#include <wtf/StdLibExtras.h> namespace JSC { @@ -47,6 +48,8 @@ public: HeapBlock* m_prev; HeapBlock* m_next; PageAllocationAligned m_allocation; + + static const size_t s_blockSize = 64 * KB; }; } // namespace JSC diff --git a/Source/JavaScriptCore/heap/MarkStack.cpp b/Source/JavaScriptCore/heap/MarkStack.cpp index 9a3092396..129a7ab67 100644 --- a/Source/JavaScriptCore/heap/MarkStack.cpp +++ b/Source/JavaScriptCore/heap/MarkStack.cpp @@ -26,8 +26,8 @@ #include "config.h" #include "MarkStack.h" -#include "BumpSpace.h" -#include "BumpSpaceInlineMethods.h" +#include "CopiedSpace.h" +#include "CopiedSpaceInlineMethods.h" #include "ConservativeRoots.h" #include "Heap.h" #include "Options.h" @@ -226,16 +226,15 @@ void MarkStackThreadSharedData::markingThreadMain() slotVisitor.drainFromShared(SlotVisitor::SlaveDrain); } -void* MarkStackThreadSharedData::markingThreadStartFunc(void* shared) +void MarkStackThreadSharedData::markingThreadStartFunc(void* shared) { static_cast<MarkStackThreadSharedData*>(shared)->markingThreadMain(); - return 0; } #endif MarkStackThreadSharedData::MarkStackThreadSharedData(JSGlobalData* globalData) : m_globalData(globalData) - , m_bumpSpace(&globalData->heap.m_storageSpace) + , m_copiedSpace(&globalData->heap.m_storageSpace) , m_sharedMarkStack(m_segmentAllocator) , m_numberOfActiveParallelMarkers(0) , m_parallelMarkersShouldExit(false) @@ -258,7 +257,7 @@ MarkStackThreadSharedData::~MarkStackThreadSharedData() m_markingCondition.broadcast(); } for (unsigned i = 0; i < m_markingThreads.size(); ++i) - waitForThreadCompletion(m_markingThreads[i], 0); + waitForThreadCompletion(m_markingThreads[i]); #endif } @@ -304,7 +303,7 @@ ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell #endif ASSERT(Heap::isMarked(cell)); - + if (isJSString(cell)) { JSString::visitChildren(const_cast<JSCell*>(cell), visitor); return; @@ -402,7 +401,7 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode) while (true) { // Did we reach termination? if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) { - // Let any sleeping slaves know it's time for them to give their private BumpBlocks back + // Let any sleeping slaves know it's time for them to give their private CopiedBlocks back m_shared.m_markingCondition.broadcast(); return; } @@ -459,32 +458,32 @@ void MarkStack::mergeOpaqueRoots() void SlotVisitor::startCopying() { ASSERT(!m_copyBlock); - if (!m_shared.m_bumpSpace->borrowBlock(&m_copyBlock)) + if (!m_shared.m_copiedSpace->borrowBlock(&m_copyBlock)) CRASH(); } void* SlotVisitor::allocateNewSpace(void* ptr, size_t bytes) { - if (BumpSpace::isOversize(bytes)) { - m_shared.m_bumpSpace->pin(BumpSpace::oversizeBlockFor(ptr)); + if (CopiedSpace::isOversize(bytes)) { + m_shared.m_copiedSpace->pin(CopiedSpace::oversizeBlockFor(ptr)); return 0; } - if (m_shared.m_bumpSpace->isPinned(ptr)) + if (m_shared.m_copiedSpace->isPinned(ptr)) return 0; // The only time it's possible to have a null copy block is if we have just started copying. if (!m_copyBlock) startCopying(); - if (!BumpSpace::fitsInBlock(m_copyBlock, bytes)) { + if (!CopiedSpace::fitsInBlock(m_copyBlock, bytes)) { // We don't need to lock across these two calls because the master thread won't // call doneCopying() because this thread is considered active. - m_shared.m_bumpSpace->doneFillingBlock(m_copyBlock); - if (!m_shared.m_bumpSpace->borrowBlock(&m_copyBlock)) + m_shared.m_copiedSpace->doneFillingBlock(m_copyBlock); + if (!m_shared.m_copiedSpace->borrowBlock(&m_copyBlock)) CRASH(); } - return BumpSpace::allocateFromBlock(m_copyBlock, bytes); + return CopiedSpace::allocateFromBlock(m_copyBlock, bytes); } void SlotVisitor::copy(void** ptr, size_t bytes) @@ -524,7 +523,7 @@ void SlotVisitor::doneCopying() if (!m_copyBlock) return; - m_shared.m_bumpSpace->doneFillingBlock(m_copyBlock); + m_shared.m_copiedSpace->doneFillingBlock(m_copyBlock); m_copyBlock = 0; } diff --git a/Source/JavaScriptCore/heap/MarkStack.h b/Source/JavaScriptCore/heap/MarkStack.h index 6923cdd8a..0695b1b32 100644 --- a/Source/JavaScriptCore/heap/MarkStack.h +++ b/Source/JavaScriptCore/heap/MarkStack.h @@ -26,7 +26,7 @@ #ifndef MarkStack_h #define MarkStack_h -#include "BumpSpace.h" +#include "CopiedSpace.h" #include "HandleTypes.h" #include "Options.h" #include "JSValue.h" @@ -178,11 +178,11 @@ namespace JSC { #if ENABLE(PARALLEL_GC) void markingThreadMain(); - static void* markingThreadStartFunc(void* heap); + static void markingThreadStartFunc(void* heap); #endif JSGlobalData* m_globalData; - BumpSpace* m_bumpSpace; + CopiedSpace* m_copiedSpace; MarkStackSegmentAllocator m_segmentAllocator; diff --git a/Source/JavaScriptCore/heap/MarkedAllocator.cpp b/Source/JavaScriptCore/heap/MarkedAllocator.cpp index 8239fbaed..eb6d2c691 100644 --- a/Source/JavaScriptCore/heap/MarkedAllocator.cpp +++ b/Source/JavaScriptCore/heap/MarkedAllocator.cpp @@ -97,11 +97,11 @@ MarkedBlock* MarkedAllocator::allocateBlock(AllocationEffort allocationEffort) block = 0; } if (block) - block = MarkedBlock::recycle(block, m_heap, m_cellSize); + block = MarkedBlock::recycle(block, m_heap, m_cellSize, m_cellsNeedDestruction); else if (allocationEffort == AllocationCanFail) return 0; else - block = MarkedBlock::create(m_heap, m_cellSize); + block = MarkedBlock::create(m_heap, m_cellSize, m_cellsNeedDestruction); m_markedSpace->didAddBlock(block); diff --git a/Source/JavaScriptCore/heap/MarkedAllocator.h b/Source/JavaScriptCore/heap/MarkedAllocator.h index 5644c691b..1c6af77a2 100644 --- a/Source/JavaScriptCore/heap/MarkedAllocator.h +++ b/Source/JavaScriptCore/heap/MarkedAllocator.h @@ -8,6 +8,7 @@ namespace JSC { class Heap; class MarkedSpace; +class LLIntOffsetsExtractor; namespace DFG { class SpeculativeJIT; @@ -22,6 +23,7 @@ public: void reset(); void zapFreeList(); size_t cellSize() { return m_cellSize; } + bool cellsNeedDestruction() { return m_cellsNeedDestruction; } void* allocate(); Heap* heap() { return m_heap; } @@ -29,11 +31,11 @@ public: void addBlock(MarkedBlock*); void removeBlock(MarkedBlock*); - void setHeap(Heap* heap) { m_heap = heap; } - void setCellSize(size_t cellSize) { m_cellSize = cellSize; } - void setMarkedSpace(MarkedSpace* space) { m_markedSpace = space; } + void init(Heap*, MarkedSpace*, size_t cellSize, bool cellsNeedDestruction); private: + friend class LLIntOffsetsExtractor; + JS_EXPORT_PRIVATE void* allocateSlowCase(); void* tryAllocate(); void* tryAllocateHelper(); @@ -43,6 +45,7 @@ private: MarkedBlock* m_currentBlock; DoublyLinkedList<HeapBlock> m_blockList; size_t m_cellSize; + bool m_cellsNeedDestruction; Heap* m_heap; MarkedSpace* m_markedSpace; }; @@ -51,11 +54,20 @@ inline MarkedAllocator::MarkedAllocator() : m_firstFreeCell(0) , m_currentBlock(0) , m_cellSize(0) + , m_cellsNeedDestruction(true) , m_heap(0) , m_markedSpace(0) { } - + +inline void MarkedAllocator::init(Heap* heap, MarkedSpace* markedSpace, size_t cellSize, bool cellsNeedDestruction) +{ + m_heap = heap; + m_markedSpace = markedSpace; + m_cellSize = cellSize; + m_cellsNeedDestruction = cellsNeedDestruction; +} + inline void* MarkedAllocator::allocate() { MarkedBlock::FreeCell* firstFreeCell = m_firstFreeCell; diff --git a/Source/JavaScriptCore/heap/MarkedBlock.cpp b/Source/JavaScriptCore/heap/MarkedBlock.cpp index dd9233300..75c21e7dd 100644 --- a/Source/JavaScriptCore/heap/MarkedBlock.cpp +++ b/Source/JavaScriptCore/heap/MarkedBlock.cpp @@ -32,17 +32,17 @@ namespace JSC { -MarkedBlock* MarkedBlock::create(Heap* heap, size_t cellSize) +MarkedBlock* MarkedBlock::create(Heap* heap, size_t cellSize, bool cellsNeedDestruction) { PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, blockSize, OSAllocator::JSGCHeapPages); if (!static_cast<bool>(allocation)) CRASH(); - return new (NotNull, allocation.base()) MarkedBlock(allocation, heap, cellSize); + return new (NotNull, allocation.base()) MarkedBlock(allocation, heap, cellSize, cellsNeedDestruction); } -MarkedBlock* MarkedBlock::recycle(MarkedBlock* block, Heap* heap, size_t cellSize) +MarkedBlock* MarkedBlock::recycle(MarkedBlock* block, Heap* heap, size_t cellSize, bool cellsNeedDestruction) { - return new (NotNull, block) MarkedBlock(block->m_allocation, heap, cellSize); + return new (NotNull, block) MarkedBlock(block->m_allocation, heap, cellSize, cellsNeedDestruction); } void MarkedBlock::destroy(MarkedBlock* block) @@ -50,10 +50,11 @@ void MarkedBlock::destroy(MarkedBlock* block) block->m_allocation.deallocate(); } -MarkedBlock::MarkedBlock(PageAllocationAligned& allocation, Heap* heap, size_t cellSize) +MarkedBlock::MarkedBlock(PageAllocationAligned& allocation, Heap* heap, size_t cellSize, bool cellsNeedDestruction) : HeapBlock(allocation) , m_atomsPerCell((cellSize + atomSize - 1) / atomSize) , m_endAtom(atomsPerBlock - m_atomsPerCell + 1) + , m_cellsNeedDestruction(cellsNeedDestruction) , m_state(New) // All cells start out unmarked. , m_heap(heap) { @@ -70,16 +71,16 @@ inline void MarkedBlock::callDestructor(JSCell* cell) #if ENABLE(SIMPLE_HEAP_PROFILING) m_heap->m_destroyedTypeCounts.countVPtr(vptr); #endif - if (cell->classInfo() != &JSFinalObject::s_info) - cell->methodTable()->destroy(cell); + cell->methodTable()->destroy(cell); cell->zap(); } -template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode> +template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, bool destructorCallNeeded> MarkedBlock::FreeCell* MarkedBlock::specializedSweep() { ASSERT(blockState != Allocated && blockState != FreeListed); + ASSERT(destructorCallNeeded || sweepMode != SweepOnly); // This produces a free list that is ordered in reverse through the block. // This is fine, since the allocation code makes no assumptions about the @@ -93,7 +94,7 @@ MarkedBlock::FreeCell* MarkedBlock::specializedSweep() if (blockState == Zapped && !cell->isZapped()) continue; - if (blockState != New) + if (destructorCallNeeded && blockState != New) callDestructor(cell); if (sweepMode == SweepToFreeList) { @@ -111,10 +112,21 @@ MarkedBlock::FreeCell* MarkedBlock::sweep(SweepMode sweepMode) { HEAP_LOG_BLOCK_STATE_TRANSITION(this); + if (sweepMode == SweepOnly && !m_cellsNeedDestruction) + return 0; + + if (m_cellsNeedDestruction) + return sweepHelper<true>(sweepMode); + return sweepHelper<false>(sweepMode); +} + +template<bool destructorCallNeeded> +MarkedBlock::FreeCell* MarkedBlock::sweepHelper(SweepMode sweepMode) +{ switch (m_state) { case New: ASSERT(sweepMode == SweepToFreeList); - return specializedSweep<New, SweepToFreeList>(); + return specializedSweep<New, SweepToFreeList, destructorCallNeeded>(); case FreeListed: // Happens when a block transitions to fully allocated. ASSERT(sweepMode == SweepToFreeList); @@ -124,12 +136,12 @@ MarkedBlock::FreeCell* MarkedBlock::sweep(SweepMode sweepMode) return 0; case Marked: return sweepMode == SweepToFreeList - ? specializedSweep<Marked, SweepToFreeList>() - : specializedSweep<Marked, SweepOnly>(); + ? specializedSweep<Marked, SweepToFreeList, destructorCallNeeded>() + : specializedSweep<Marked, SweepOnly, destructorCallNeeded>(); case Zapped: return sweepMode == SweepToFreeList - ? specializedSweep<Zapped, SweepToFreeList>() - : specializedSweep<Zapped, SweepOnly>(); + ? specializedSweep<Zapped, SweepToFreeList, destructorCallNeeded>() + : specializedSweep<Zapped, SweepOnly, destructorCallNeeded>(); } ASSERT_NOT_REACHED(); diff --git a/Source/JavaScriptCore/heap/MarkedBlock.h b/Source/JavaScriptCore/heap/MarkedBlock.h index 0a4ebe47e..5f70b69d4 100644 --- a/Source/JavaScriptCore/heap/MarkedBlock.h +++ b/Source/JavaScriptCore/heap/MarkedBlock.h @@ -26,6 +26,7 @@ #include "HeapBlock.h" #include <wtf/Bitmap.h> +#include <wtf/DataLog.h> #include <wtf/DoublyLinkedList.h> #include <wtf/HashFunctions.h> #include <wtf/PageAllocationAligned.h> @@ -36,9 +37,11 @@ #define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0 #if HEAP_LOG_BLOCK_STATE_TRANSITIONS -#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \ - printf("%s:%d %s: block %s = %p, %d\n", \ - __FILE__, __LINE__, __FUNCTION__, #block, (block), (block)->m_state); \ +#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \ + dataLog( \ + "%s:%d %s: block %s = %p, %d\n", \ + __FILE__, __LINE__, __FUNCTION__, \ + #block, (block), (block)->m_state); \ } while (false) #else #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0) @@ -89,8 +92,8 @@ namespace JSC { void returnValue() { } }; - static MarkedBlock* create(Heap*, size_t cellSize); - static MarkedBlock* recycle(MarkedBlock*, Heap*, size_t cellSize); + static MarkedBlock* create(Heap*, size_t cellSize, bool cellsNeedDestruction); + static MarkedBlock* recycle(MarkedBlock*, Heap*, size_t cellSize, bool cellsNeedDestruction); static void destroy(MarkedBlock*); static bool isAtomAligned(const void*); @@ -115,6 +118,7 @@ namespace JSC { bool markCountIsZero(); // Faster than markCount(). size_t cellSize(); + bool cellsNeedDestruction(); size_t size(); size_t capacity(); @@ -159,14 +163,15 @@ namespace JSC { static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two. enum BlockState { New, FreeListed, Allocated, Marked, Zapped }; + template<bool destructorCallNeeded> FreeCell* sweepHelper(SweepMode = SweepOnly); typedef char Atom[atomSize]; - MarkedBlock(PageAllocationAligned&, Heap*, size_t cellSize); + MarkedBlock(PageAllocationAligned&, Heap*, size_t cellSize, bool cellsNeedDestruction); Atom* atoms(); size_t atomNumber(const void*); void callDestructor(JSCell*); - template<BlockState, SweepMode> FreeCell* specializedSweep(); + template<BlockState, SweepMode, bool destructorCallNeeded> FreeCell* specializedSweep(); #if ENABLE(GGC) CardSet<bytesPerCard, blockSize> m_cards; @@ -179,6 +184,7 @@ namespace JSC { #else WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic> m_marks; #endif + bool m_cellsNeedDestruction; BlockState m_state; Heap* m_heap; }; @@ -243,6 +249,11 @@ namespace JSC { return m_atomsPerCell * atomSize; } + inline bool MarkedBlock::cellsNeedDestruction() + { + return m_cellsNeedDestruction; + } + inline size_t MarkedBlock::size() { return markCount() * cellSize(); diff --git a/Source/JavaScriptCore/heap/MarkedSpace.cpp b/Source/JavaScriptCore/heap/MarkedSpace.cpp index 87dc0493d..bf839011d 100644 --- a/Source/JavaScriptCore/heap/MarkedSpace.cpp +++ b/Source/JavaScriptCore/heap/MarkedSpace.cpp @@ -36,15 +36,13 @@ MarkedSpace::MarkedSpace(Heap* heap) , m_heap(heap) { for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) { - allocatorFor(cellSize).setCellSize(cellSize); - allocatorFor(cellSize).setHeap(heap); - allocatorFor(cellSize).setMarkedSpace(this); + allocatorFor(cellSize).init(heap, this, cellSize, false); + destructorAllocatorFor(cellSize).init(heap, this, cellSize, true); } for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) { - allocatorFor(cellSize).setCellSize(cellSize); - allocatorFor(cellSize).setHeap(heap); - allocatorFor(cellSize).setMarkedSpace(this); + allocatorFor(cellSize).init(heap, this, cellSize, false); + destructorAllocatorFor(cellSize).init(heap, this, cellSize, true); } } @@ -53,20 +51,28 @@ void MarkedSpace::resetAllocators() m_waterMark = 0; m_nurseryWaterMark = 0; - for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) + for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) { allocatorFor(cellSize).reset(); + destructorAllocatorFor(cellSize).reset(); + } - for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) + for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) { allocatorFor(cellSize).reset(); + destructorAllocatorFor(cellSize).reset(); + } } void MarkedSpace::canonicalizeCellLivenessData() { - for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) + for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) { allocatorFor(cellSize).zapFreeList(); + destructorAllocatorFor(cellSize).zapFreeList(); + } - for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) + for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) { allocatorFor(cellSize).zapFreeList(); + destructorAllocatorFor(cellSize).zapFreeList(); + } } @@ -107,7 +113,7 @@ inline void TakeIfUnmarked::operator()(MarkedBlock* block) if (!block->markCountIsZero()) return; - m_markedSpace->allocatorFor(block->cellSize()).removeBlock(block); + m_markedSpace->allocatorFor(block).removeBlock(block); m_empties.append(block); } diff --git a/Source/JavaScriptCore/heap/MarkedSpace.h b/Source/JavaScriptCore/heap/MarkedSpace.h index 21a0b48de..cfcf3f8dc 100644 --- a/Source/JavaScriptCore/heap/MarkedSpace.h +++ b/Source/JavaScriptCore/heap/MarkedSpace.h @@ -41,6 +41,7 @@ namespace JSC { class Heap; class JSCell; class LiveObjectIterator; +class LLIntOffsetsExtractor; class WeakGCHandle; class SlotVisitor; @@ -51,8 +52,12 @@ public: MarkedSpace(Heap*); + MarkedAllocator& firstAllocator(); MarkedAllocator& allocatorFor(size_t); - void* allocate(size_t); + MarkedAllocator& allocatorFor(MarkedBlock*); + MarkedAllocator& destructorAllocatorFor(size_t); + void* allocateWithDestructor(size_t); + void* allocateWithoutDestructor(size_t); void resetAllocators(); @@ -76,6 +81,8 @@ public: void didConsumeFreeList(MarkedBlock*); private: + friend class LLIntOffsetsExtractor; + // [ 32... 256 ] static const size_t preciseStep = MarkedBlock::atomSize; static const size_t preciseCutoff = 256; @@ -86,8 +93,14 @@ private: static const size_t impreciseCutoff = maxCellSize; static const size_t impreciseCount = impreciseCutoff / impreciseStep; - FixedArray<MarkedAllocator, preciseCount> m_preciseSizeClasses; - FixedArray<MarkedAllocator, impreciseCount> m_impreciseSizeClasses; + struct Subspace { + FixedArray<MarkedAllocator, preciseCount> preciseAllocators; + FixedArray<MarkedAllocator, impreciseCount> impreciseAllocators; + }; + + Subspace m_destructorSpace; + Subspace m_normalSpace; + size_t m_waterMark; size_t m_nurseryWaterMark; Heap* m_heap; @@ -120,27 +133,54 @@ template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forE return forEachCell(functor); } +inline MarkedAllocator& MarkedSpace::firstAllocator() +{ + return m_normalSpace.preciseAllocators[0]; +} + inline MarkedAllocator& MarkedSpace::allocatorFor(size_t bytes) { ASSERT(bytes && bytes <= maxCellSize); if (bytes <= preciseCutoff) - return m_preciseSizeClasses[(bytes - 1) / preciseStep]; - return m_impreciseSizeClasses[(bytes - 1) / impreciseStep]; + return m_normalSpace.preciseAllocators[(bytes - 1) / preciseStep]; + return m_normalSpace.impreciseAllocators[(bytes - 1) / impreciseStep]; +} + +inline MarkedAllocator& MarkedSpace::allocatorFor(MarkedBlock* block) +{ + if (block->cellsNeedDestruction()) + return destructorAllocatorFor(block->cellSize()); + return allocatorFor(block->cellSize()); } -inline void* MarkedSpace::allocate(size_t bytes) +inline MarkedAllocator& MarkedSpace::destructorAllocatorFor(size_t bytes) +{ + ASSERT(bytes && bytes <= maxCellSize); + if (bytes <= preciseCutoff) + return m_destructorSpace.preciseAllocators[(bytes - 1) / preciseStep]; + return m_destructorSpace.impreciseAllocators[(bytes - 1) / impreciseStep]; +} + +inline void* MarkedSpace::allocateWithoutDestructor(size_t bytes) { return allocatorFor(bytes).allocate(); } +inline void* MarkedSpace::allocateWithDestructor(size_t bytes) +{ + return destructorAllocatorFor(bytes).allocate(); +} + template <typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachBlock(Functor& functor) { for (size_t i = 0; i < preciseCount; ++i) { - m_preciseSizeClasses[i].forEachBlock(functor); + m_normalSpace.preciseAllocators[i].forEachBlock(functor); + m_destructorSpace.preciseAllocators[i].forEachBlock(functor); } for (size_t i = 0; i < impreciseCount; ++i) { - m_impreciseSizeClasses[i].forEachBlock(functor); + m_normalSpace.impreciseAllocators[i].forEachBlock(functor); + m_destructorSpace.impreciseAllocators[i].forEachBlock(functor); } return functor.returnValue(); diff --git a/Source/JavaScriptCore/heap/PassWeak.h b/Source/JavaScriptCore/heap/PassWeak.h new file mode 100644 index 000000000..b7aa7b10d --- /dev/null +++ b/Source/JavaScriptCore/heap/PassWeak.h @@ -0,0 +1,147 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PassWeak_h +#define PassWeak_h + +#include "Assertions.h" +#include "Handle.h" +#include "NullPtr.h" +#include "TypeTraits.h" + +namespace JSC { + +template<typename T> class Weak; +template<typename T> class PassWeak; +template<typename T> PassWeak<T> adoptWeak(HandleSlot); + +template<typename T> class PassWeak : public Handle<T> { + using Handle<T>::slot; + using Handle<T>::setSlot; + +public: + typedef typename Handle<T>::ExternalType ExternalType; + + PassWeak() : Handle<T>() { } + PassWeak(std::nullptr_t) : Handle<T>() { } + + PassWeak(JSGlobalData& globalData, ExternalType externalType = ExternalType(), WeakHandleOwner* weakOwner = 0, void* context = 0) + : Handle<T>(globalData.heap.handleHeap()->allocate()) + { + HandleHeap::heapFor(slot())->makeWeak(slot(), weakOwner, context); + JSValue value = HandleTypes<T>::toJSValue(externalType); + HandleHeap::heapFor(slot())->writeBarrier(slot(), value); + *slot() = value; + } + + // It somewhat breaks the type system to allow transfer of ownership out of + // a const PassWeak. However, it makes it much easier to work with PassWeak + // temporaries, and we don't have a need to use real const PassWeaks anyway. + PassWeak(const PassWeak& o) : Handle<T>(o.leakHandle()) { } + template<typename U> PassWeak(const PassWeak<U>& o) : Handle<T>(o.leakHandle()) { } + + ~PassWeak() + { + if (!slot()) + return; + HandleHeap::heapFor(slot())->deallocate(slot()); + setSlot(0); + } + + ExternalType get() const { return HandleTypes<T>::getFromSlot(slot()); } + + HandleSlot leakHandle() const WARN_UNUSED_RETURN; + +private: + friend PassWeak adoptWeak<T>(HandleSlot); + + explicit PassWeak(HandleSlot slot) : Handle<T>(slot) { } +}; + +template<typename T> inline HandleSlot PassWeak<T>::leakHandle() const +{ + HandleSlot slot = this->slot(); + const_cast<PassWeak<T>*>(this)->setSlot(0); + return slot; +} + +template<typename T> PassWeak<T> adoptWeak(HandleSlot slot) +{ + return PassWeak<T>(slot); +} + +template<typename T, typename U> inline bool operator==(const PassWeak<T>& a, const PassWeak<U>& b) +{ + return a.get() == b.get(); +} + +template<typename T, typename U> inline bool operator==(const PassWeak<T>& a, const Weak<U>& b) +{ + return a.get() == b.get(); +} + +template<typename T, typename U> inline bool operator==(const Weak<T>& a, const PassWeak<U>& b) +{ + return a.get() == b.get(); +} + +template<typename T, typename U> inline bool operator==(const PassWeak<T>& a, U* b) +{ + return a.get() == b; +} + +template<typename T, typename U> inline bool operator==(T* a, const PassWeak<U>& b) +{ + return a == b.get(); +} + +template<typename T, typename U> inline bool operator!=(const PassWeak<T>& a, const PassWeak<U>& b) +{ + return a.get() != b.get(); +} + +template<typename T, typename U> inline bool operator!=(const PassWeak<T>& a, const Weak<U>& b) +{ + return a.get() != b.get(); +} + +template<typename T, typename U> inline bool operator!=(const Weak<T>& a, const PassWeak<U>& b) +{ + return a.get() != b.get(); +} + +template<typename T, typename U> inline bool operator!=(const PassWeak<T>& a, U* b) +{ + return a.get() != b; +} + +template<typename T, typename U> inline bool operator!=(T* a, const PassWeak<U>& b) +{ + return a != b.get(); +} + +} // namespace JSC + +#endif // PassWeak_h diff --git a/Source/JavaScriptCore/heap/SlotVisitor.h b/Source/JavaScriptCore/heap/SlotVisitor.h index e49a9a637..6584db703 100644 --- a/Source/JavaScriptCore/heap/SlotVisitor.h +++ b/Source/JavaScriptCore/heap/SlotVisitor.h @@ -26,7 +26,7 @@ #ifndef SlotVisitor_h #define SlotVisitor_h -#include "BumpSpace.h" +#include "CopiedSpace.h" #include "MarkStack.h" namespace JSC { @@ -78,7 +78,7 @@ private: donateSlow(); } - BumpBlock* m_copyBlock; + CopiedBlock* m_copyBlock; }; inline SlotVisitor::SlotVisitor(MarkStackThreadSharedData& shared) diff --git a/Source/JavaScriptCore/heap/Weak.h b/Source/JavaScriptCore/heap/Weak.h index f0c028d71..96fe1b58c 100644 --- a/Source/JavaScriptCore/heap/Weak.h +++ b/Source/JavaScriptCore/heap/Weak.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,11 +30,14 @@ #include "Handle.h" #include "HandleHeap.h" #include "JSGlobalData.h" +#include "PassWeak.h" namespace JSC { // A weakly referenced handle that becomes 0 when the value it points to is garbage collected. template <typename T> class Weak : public Handle<T> { + WTF_MAKE_NONCOPYABLE(Weak); + using Handle<T>::slot; using Handle<T>::setSlot; @@ -46,11 +49,18 @@ public: { } - Weak(JSGlobalData& globalData, ExternalType value = ExternalType(), WeakHandleOwner* weakOwner = 0, void* context = 0) + Weak(std::nullptr_t) + : Handle<T>() + { + } + + Weak(JSGlobalData& globalData, ExternalType externalType = ExternalType(), WeakHandleOwner* weakOwner = 0, void* context = 0) : Handle<T>(globalData.heap.handleHeap()->allocate()) { HandleHeap::heapFor(slot())->makeWeak(slot(), weakOwner, context); - set(value); + JSValue value = HandleTypes<T>::toJSValue(externalType); + HandleHeap::heapFor(slot())->writeBarrier(slot(), value); + *slot() = value; } enum AdoptTag { Adopt }; @@ -59,23 +69,7 @@ public: { validateCell(get()); } - - Weak(const Weak& other) - : Handle<T>() - { - if (!other.slot()) - return; - setSlot(HandleHeap::heapFor(other.slot())->copyWeak(other.slot())); - } - template <typename U> Weak(const Weak<U>& other) - : Handle<T>() - { - if (!other.slot()) - return; - setSlot(HandleHeap::heapFor(other.slot())->copyWeak(other.slot())); - } - enum HashTableDeletedValueTag { HashTableDeletedValue }; bool isHashTableDeletedValue() const { return slot() == hashTableDeletedValue(); } Weak(HashTableDeletedValueTag) @@ -83,6 +77,11 @@ public: { } + template<typename U> Weak(const PassWeak<U>& other) + : Handle<T>(other.leakHandle()) + { + } + ~Weak() { clear(); @@ -93,8 +92,12 @@ public: Handle<T>::swap(other); } + Weak& operator=(const PassWeak<T>&); + ExternalType get() const { return HandleTypes<T>::getFromSlot(slot()); } + PassWeak<T> release() { PassWeak<T> tmp = adoptWeak<T>(slot()); setSlot(0); return tmp; } + void clear() { if (!slot()) @@ -103,32 +106,6 @@ public: setSlot(0); } - void set(JSGlobalData& globalData, ExternalType value, WeakHandleOwner* weakOwner = 0, void* context = 0) - { - if (!slot()) { - setSlot(globalData.heap.handleHeap()->allocate()); - HandleHeap::heapFor(slot())->makeWeak(slot(), weakOwner, context); - } - ASSERT(HandleHeap::heapFor(slot())->hasWeakOwner(slot(), weakOwner)); - set(value); - } - - template <typename U> Weak& operator=(const Weak<U>& other) - { - clear(); - if (other.slot()) - setSlot(HandleHeap::heapFor(other.slot())->copyWeak(other.slot())); - return *this; - } - - Weak& operator=(const Weak& other) - { - clear(); - if (other.slot()) - setSlot(HandleHeap::heapFor(other.slot())->copyWeak(other.slot())); - return *this; - } - HandleSlot leakHandle() { ASSERT(HandleHeap::heapFor(slot())->hasFinalizer(slot())); @@ -139,14 +116,6 @@ public: private: static HandleSlot hashTableDeletedValue() { return reinterpret_cast<HandleSlot>(-1); } - - void set(ExternalType externalType) - { - ASSERT(slot()); - JSValue value = HandleTypes<T>::toJSValue(externalType); - HandleHeap::heapFor(slot())->writeBarrier(slot(), value); - *slot() = value; - } }; template<class T> inline void swap(Weak<T>& a, Weak<T>& b) @@ -154,6 +123,13 @@ template<class T> inline void swap(Weak<T>& a, Weak<T>& b) a.swap(b); } +template<typename T> inline Weak<T>& Weak<T>::operator=(const PassWeak<T>& o) +{ + clear(); + setSlot(o.leakHandle()); + return *this; +} + } // namespace JSC namespace WTF { @@ -162,7 +138,23 @@ template<typename T> struct VectorTraits<JSC::Weak<T> > : SimpleClassVectorTrait static const bool canCompareWithMemcmp = false; }; -template<typename P> struct HashTraits<JSC::Weak<P> > : SimpleClassHashTraits<JSC::Weak<P> > { }; +template<typename T> struct HashTraits<JSC::Weak<T> > : SimpleClassHashTraits<JSC::Weak<T> > { + typedef JSC::Weak<T> StorageType; + + typedef std::nullptr_t EmptyValueType; + static EmptyValueType emptyValue() { return nullptr; } + + typedef JSC::PassWeak<T> PassInType; + static void store(PassInType value, StorageType& storage) { storage = value; } + + typedef JSC::PassWeak<T> PassOutType; + static PassOutType passOut(StorageType& value) { return value.release(); } + static PassOutType passOut(EmptyValueType) { return PassOutType(); } + + typedef typename StorageType::ExternalType PeekType; + static PeekType peek(const StorageType& value) { return value.get(); } + static PeekType peek(EmptyValueType) { return PeekType(); } +}; } |