summaryrefslogtreecommitdiff
path: root/chromium/v8/src/heap/spaces.cc
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2017-09-18 14:34:04 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2017-10-04 11:15:27 +0000
commite6430e577f105ad8813c92e75c54660c4985026e (patch)
tree88115e5d1fb471fea807111924dcccbeadbf9e4f /chromium/v8/src/heap/spaces.cc
parent53d399fe6415a96ea6986ec0d402a9c07da72453 (diff)
downloadqtwebengine-chromium-e6430e577f105ad8813c92e75c54660c4985026e.tar.gz
BASELINE: Update Chromium to 61.0.3163.99
Change-Id: I8452f34574d88ca2b27af9bd56fc9ff3f16b1367 Reviewed-by: Alexandru Croitor <alexandru.croitor@qt.io>
Diffstat (limited to 'chromium/v8/src/heap/spaces.cc')
-rw-r--r--chromium/v8/src/heap/spaces.cc308
1 files changed, 214 insertions, 94 deletions
diff --git a/chromium/v8/src/heap/spaces.cc b/chromium/v8/src/heap/spaces.cc
index 71e1b60be97..6f4546c8169 100644
--- a/chromium/v8/src/heap/spaces.cc
+++ b/chromium/v8/src/heap/spaces.cc
@@ -123,8 +123,10 @@ bool CodeRange::SetUp(size_t requested) {
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
code_range_ = new base::VirtualMemory(
- requested, Max(kCodeRangeAreaAlignment,
- static_cast<size_t>(base::OS::AllocateAlignment())));
+ requested,
+ Max(kCodeRangeAreaAlignment,
+ static_cast<size_t>(base::OS::AllocateAlignment())),
+ base::OS::GetRandomMmapAddr());
CHECK(code_range_ != NULL);
if (!code_range_->IsReserved()) {
delete code_range_;
@@ -416,7 +418,7 @@ bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
// because the memory chunk can be in the queue of a sweeper task.
// Chunks in old generation are unmapped if they are empty.
DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
- return !chunk->InNewSpace() || mc == nullptr || !FLAG_concurrent_sweeping ||
+ return !chunk->InNewSpace() || mc == nullptr ||
!mc->sweeper().sweeping_in_progress();
}
@@ -460,23 +462,29 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
}
Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
+ void* hint,
base::VirtualMemory* controller) {
- base::VirtualMemory reservation(size, alignment);
+ base::VirtualMemory reservation(size, alignment, hint);
- if (!reservation.IsReserved()) return NULL;
- size_.Increment(reservation.size());
- Address base =
+ if (!reservation.IsReserved()) return nullptr;
+ const Address base =
RoundUp(static_cast<Address>(reservation.address()), alignment);
+ if (base + size != reservation.end()) {
+ const Address unused_start = RoundUp(base + size, GetCommitPageSize());
+ reservation.ReleasePartial(unused_start);
+ }
+ size_.Increment(reservation.size());
controller->TakeControl(&reservation);
return base;
}
Address MemoryAllocator::AllocateAlignedMemory(
size_t reserve_size, size_t commit_size, size_t alignment,
- Executability executable, base::VirtualMemory* controller) {
+ Executability executable, void* hint, base::VirtualMemory* controller) {
DCHECK(commit_size <= reserve_size);
base::VirtualMemory reservation;
- Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
+ Address base =
+ ReserveAlignedMemory(reserve_size, alignment, hint, &reservation);
if (base == NULL) return NULL;
if (executable == EXECUTABLE) {
@@ -512,6 +520,23 @@ void Page::InitializeAsAnchor(Space* space) {
SetFlag(ANCHOR);
}
+Heap* MemoryChunk::synchronized_heap() {
+ return reinterpret_cast<Heap*>(
+ base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
+}
+
+void MemoryChunk::InitializationMemoryFence() {
+ base::MemoryFence();
+#ifdef THREAD_SANITIZER
+ // Since TSAN does not process memory fences, we use the following annotation
+ // to tell TSAN that there is no data race when emitting a
+ // InitializationMemoryFence. Note that the other thread still needs to
+ // perform MemoryChunk::synchronized_heap().
+ base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
+ reinterpret_cast<base::AtomicWord>(heap_));
+#endif
+}
+
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
@@ -527,10 +552,12 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->flags_ = Flags(NO_FLAGS);
chunk->set_owner(owner);
chunk->InitializeReservedMemory();
- chunk->slot_set_[OLD_TO_NEW].SetValue(nullptr);
- chunk->slot_set_[OLD_TO_OLD].SetValue(nullptr);
- chunk->typed_slot_set_[OLD_TO_NEW].SetValue(nullptr);
- chunk->typed_slot_set_[OLD_TO_OLD].SetValue(nullptr);
+ base::AsAtomicWord::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
+ base::AsAtomicWord::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
+ base::AsAtomicWord::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
+ nullptr);
+ base::AsAtomicWord::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
+ nullptr);
chunk->skip_list_ = nullptr;
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
@@ -554,10 +581,83 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
if (reservation != nullptr) {
chunk->reservation_.TakeControl(reservation);
}
-
return chunk;
}
+template <Page::InitializationMode mode>
+Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
+ PagedSpace* owner) {
+ Page* page = reinterpret_cast<Page*>(chunk);
+ DCHECK(page->area_size() <= kAllocatableMemory);
+ DCHECK(chunk->owner() == owner);
+
+ owner->IncreaseCapacity(page->area_size());
+ heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+
+ // Make sure that categories are initialized before freeing the area.
+ page->InitializeFreeListCategories();
+ // In the case we do not free the memory, we effectively account for the whole
+ // page as allocated memory that cannot be used for further allocations.
+ if (mode == kFreeMemory) {
+ owner->Free(page->area_start(), page->area_size());
+ }
+ page->InitializationMemoryFence();
+ return page;
+}
+
+Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
+ SemiSpace* owner) {
+ DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
+ bool in_to_space = (owner->id() != kFromSpace);
+ chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
+ : MemoryChunk::IN_FROM_SPACE);
+ DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
+ : MemoryChunk::IN_TO_SPACE));
+ Page* page = static_cast<Page*>(chunk);
+ heap->incremental_marking()->SetNewSpacePageFlags(page);
+ page->AllocateLocalTracker();
+ if (FLAG_minor_mc) {
+ page->AllocateYoungGenerationBitmap();
+ MarkingState::External(page).ClearLiveness();
+ }
+ page->InitializationMemoryFence();
+ return page;
+}
+
+LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable, Space* owner) {
+ if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
+ STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
+ FATAL("Code page is too large.");
+ }
+ heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
+
+ // Initialize the owner field for each contained page (except the first, which
+ // is initialized by MemoryChunk::Initialize).
+ for (Address addr = chunk->address() + Page::kPageSize + Page::kOwnerOffset;
+ addr < chunk->area_end(); addr += Page::kPageSize) {
+ // Clear out kPageHeaderTag.
+ Memory::Address_at(addr) = 0;
+ }
+ LargePage* page = static_cast<LargePage*>(chunk);
+ page->InitializationMemoryFence();
+ return page;
+}
+
+Page* Page::ConvertNewToOld(Page* old_page) {
+ DCHECK(!old_page->is_anchor());
+ DCHECK(old_page->InNewSpace());
+ OldSpace* old_space = old_page->heap()->old_space();
+ old_page->set_owner(old_space);
+ old_page->SetFlags(0, static_cast<uintptr_t>(~0));
+ old_space->AccountCommitted(old_page->size());
+ Page* new_page = Page::Initialize<kDoNotFreeMemory>(
+ old_page->heap(), old_page, NOT_EXECUTABLE, old_space);
+ new_page->InsertAfter(old_space->anchor()->prev_page());
+ return new_page;
+}
// Commit MemoryChunk area to the requested size.
bool MemoryChunk::CommitArea(size_t requested) {
@@ -634,22 +734,6 @@ void MemoryChunk::Unlink() {
set_next_chunk(NULL);
}
-void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
- DCHECK_GE(bytes_to_shrink, static_cast<size_t>(GetCommitPageSize()));
- DCHECK_EQ(0u, bytes_to_shrink % GetCommitPageSize());
- Address free_start = chunk->area_end_ - bytes_to_shrink;
- // Don't adjust the size of the page. The area is just uncomitted but not
- // released.
- chunk->area_end_ -= bytes_to_shrink;
- UncommitBlock(free_start, bytes_to_shrink);
- if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
- if (chunk->reservation_.IsReserved())
- chunk->reservation_.Guard(chunk->area_end_);
- else
- base::OS::Guard(chunk->area_end_, GetCommitPageSize());
- }
-}
-
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
@@ -662,6 +746,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
base::VirtualMemory reservation;
Address area_start = nullptr;
Address area_end = nullptr;
+ void* address_hint = heap->GetRandomMmapAddr();
//
// MemoryChunk layout:
@@ -721,7 +806,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
} else {
base = AllocateAlignedMemory(chunk_size, commit_size,
MemoryChunk::kAlignment, executable,
- &reservation);
+ address_hint, &reservation);
if (base == NULL) return NULL;
// Update executable memory size.
size_executable_.Increment(reservation.size());
@@ -742,7 +827,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
GetCommitPageSize());
base =
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
- executable, &reservation);
+ executable, address_hint, &reservation);
if (base == NULL) return NULL;
@@ -797,6 +882,11 @@ size_t Page::AvailableInFreeList() {
}
size_t Page::ShrinkToHighWaterMark() {
+ // Shrinking only makes sense outside of the CodeRange, where we don't care
+ // about address space fragmentation.
+ base::VirtualMemory* reservation = reserved_memory();
+ if (!reservation->IsReserved()) return 0;
+
// Shrink pages to high water mark. The water mark points either to a filler
// or the area_end.
HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
@@ -826,6 +916,7 @@ size_t Page::ShrinkToHighWaterMark() {
static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
MemoryAllocator::GetCommitPageSize());
if (unused > 0) {
+ DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
reinterpret_cast<void*>(this),
@@ -836,7 +927,8 @@ size_t Page::ShrinkToHighWaterMark() {
filler->address(),
static_cast<int>(area_end() - filler->address() - unused),
ClearRecordedSlots::kNo);
- heap()->memory_allocator()->ShrinkChunk(this, unused);
+ heap()->memory_allocator()->PartialFreeMemory(
+ this, address() + size() - unused, unused, area_end() - unused);
CHECK(filler->IsFiller());
CHECK_EQ(filler->address() + filler->Size(), area_end());
}
@@ -850,8 +942,9 @@ void Page::CreateBlackArea(Address start, Address end) {
DCHECK_EQ(Page::FromAddress(end - 1), this);
MarkingState::Internal(this).bitmap()->SetRange(AddressToMarkbitIndex(start),
AddressToMarkbitIndex(end));
- MarkingState::Internal(this).IncrementLiveBytes(
- static_cast<int>(end - start));
+ MarkingState::Internal(this)
+ .IncrementLiveBytes<IncrementalMarking::kAtomicity>(
+ static_cast<int>(end - start));
}
void Page::DestroyBlackArea(Address start, Address end) {
@@ -861,29 +954,33 @@ void Page::DestroyBlackArea(Address start, Address end) {
DCHECK_EQ(Page::FromAddress(end - 1), this);
MarkingState::Internal(this).bitmap()->ClearRange(
AddressToMarkbitIndex(start), AddressToMarkbitIndex(end));
- MarkingState::Internal(this).IncrementLiveBytes(
- -static_cast<int>(end - start));
+ MarkingState::Internal(this)
+ .IncrementLiveBytes<IncrementalMarking::kAtomicity>(
+ -static_cast<int>(end - start));
}
-void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
- Address start_free) {
- // We do not allow partial shrink for code.
- DCHECK(chunk->executable() == NOT_EXECUTABLE);
-
- intptr_t size;
+void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
+ size_t bytes_to_free,
+ Address new_area_end) {
base::VirtualMemory* reservation = chunk->reserved_memory();
DCHECK(reservation->IsReserved());
- size = static_cast<intptr_t>(reservation->size());
-
- size_t to_free_size = size - (start_free - chunk->address());
-
- DCHECK(size_.Value() >= to_free_size);
- size_.Decrement(to_free_size);
+ chunk->size_ -= bytes_to_free;
+ chunk->area_end_ = new_area_end;
+ if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(chunk->area_end_) %
+ static_cast<uintptr_t>(GetCommitPageSize()));
+ DCHECK_EQ(chunk->address() + chunk->size(),
+ chunk->area_end() + CodePageGuardSize());
+ reservation->Guard(chunk->area_end_);
+ }
+ // On e.g. Windows, a reservation may be larger than a page and releasing
+ // partially starting at |start_free| will also release the potentially
+ // unused part behind the current page.
+ const size_t released_bytes = reservation->ReleasePartial(start_free);
+ DCHECK_GE(size_.Value(), released_bytes);
+ size_.Decrement(released_bytes);
isolate_->counters()->memory_allocated()->Decrement(
- static_cast<int>(to_free_size));
- chunk->set_size(size - to_free_size);
-
- reservation->ReleasePartial(start_free);
+ static_cast<int>(released_bytes));
}
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
@@ -1071,7 +1168,7 @@ size_t MemoryAllocator::CodePageAreaEndOffset() {
intptr_t MemoryAllocator::GetCommitPageSize() {
if (FLAG_v8_os_page_size != 0) {
- DCHECK(base::bits::IsPowerOfTwo32(FLAG_v8_os_page_size));
+ DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
return FLAG_v8_os_page_size * KB;
} else {
return base::OS::CommitPageSize();
@@ -1111,6 +1208,10 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
// -----------------------------------------------------------------------------
// MemoryChunk implementation
+bool MemoryChunk::contains_array_buffers() {
+ return local_tracker() != nullptr && !local_tracker()->IsEmpty();
+}
+
void MemoryChunk::ReleaseAllocatedMemory() {
if (skip_list_ != nullptr) {
delete skip_list_;
@@ -1144,12 +1245,13 @@ template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
SlotSet* MemoryChunk::AllocateSlotSet() {
SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
- if (!slot_set_[type].TrySetValue(nullptr, slot_set)) {
+ SlotSet* old_slot_set = base::AsAtomicWord::Release_CompareAndSwap(
+ &slot_set_[type], nullptr, slot_set);
+ if (old_slot_set != nullptr) {
delete[] slot_set;
- slot_set = slot_set_[type].Value();
- DCHECK(slot_set);
- return slot_set;
+ slot_set = old_slot_set;
}
+ DCHECK(slot_set);
return slot_set;
}
@@ -1158,10 +1260,10 @@ template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
void MemoryChunk::ReleaseSlotSet() {
- SlotSet* slot_set = slot_set_[type].Value();
+ SlotSet* slot_set = slot_set_[type];
if (slot_set) {
+ slot_set_[type] = nullptr;
delete[] slot_set;
- slot_set_[type].SetValue(nullptr);
}
}
@@ -1170,14 +1272,15 @@ template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
- TypedSlotSet* slot_set = new TypedSlotSet(address());
- if (!typed_slot_set_[type].TrySetValue(nullptr, slot_set)) {
- delete slot_set;
- slot_set = typed_slot_set_[type].Value();
- DCHECK(slot_set);
- return slot_set;
+ TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
+ TypedSlotSet* old_value = base::AsAtomicWord::Release_CompareAndSwap(
+ &typed_slot_set_[type], nullptr, typed_slot_set);
+ if (old_value != nullptr) {
+ delete typed_slot_set;
+ typed_slot_set = old_value;
}
- return slot_set;
+ DCHECK(typed_slot_set);
+ return typed_slot_set;
}
template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
@@ -1185,10 +1288,10 @@ template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
void MemoryChunk::ReleaseTypedSlotSet() {
- TypedSlotSet* typed_slot_set = typed_slot_set_[type].Value();
+ TypedSlotSet* typed_slot_set = typed_slot_set_[type];
if (typed_slot_set) {
+ typed_slot_set_[type] = nullptr;
delete typed_slot_set;
- typed_slot_set_[type].SetValue(nullptr);
}
}
@@ -1309,6 +1412,8 @@ void PagedSpace::RefillFreeList() {
}
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
+ base::LockGuard<base::Mutex> guard(mutex());
+
DCHECK(identity() == other->identity());
// Unmerged fields:
// area_size_
@@ -1403,9 +1508,7 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
size_t unused = page->ShrinkToHighWaterMark();
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
- // Do not account for the unused space as uncommitted because the counter
- // is kept in sync with page size which is also not adjusted for those
- // chunks.
+ AccountUncommitted(unused);
}
}
@@ -1496,8 +1599,9 @@ void PagedSpace::EmptyAllocationInfo() {
MarkingState::Internal(page).bitmap()->ClearRange(
page->AddressToMarkbitIndex(current_top),
page->AddressToMarkbitIndex(current_limit));
- MarkingState::Internal(page).IncrementLiveBytes(
- -static_cast<int>(current_limit - current_top));
+ MarkingState::Internal(page)
+ .IncrementLiveBytes<IncrementalMarking::kAtomicity>(
+ -static_cast<int>(current_limit - current_top));
}
}
@@ -1576,14 +1680,16 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// All the interior pointers should be contained in the heap.
int size = object->Size();
object->IterateBody(map->instance_type(), size, visitor);
- if (ObjectMarking::IsBlack(object, MarkingState::Internal(object))) {
+ if (ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
+ object, MarkingState::Internal(object))) {
black_size += size;
}
CHECK(object->address() + size <= top);
end_of_previous_object = object->address() + size;
}
- CHECK_LE(black_size, MarkingState::Internal(page).live_bytes());
+ CHECK_LE(black_size,
+ MarkingState::Internal(page).live_bytes<AccessMode::ATOMIC>());
}
CHECK(allocation_pointer_found_in_space);
}
@@ -1595,7 +1701,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
bool NewSpace::SetUp(size_t initial_semispace_capacity,
size_t maximum_semispace_capacity) {
DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
- DCHECK(base::bits::IsPowerOfTwo32(
+ DCHECK(base::bits::IsPowerOfTwo(
static_cast<uint32_t>(maximum_semispace_capacity)));
to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
@@ -1681,7 +1787,6 @@ void NewSpace::Shrink() {
}
bool NewSpace::Rebalance() {
- CHECK(heap()->promotion_queue()->is_empty());
// Order here is important to make use of the page pool.
return to_space_.EnsureCurrentCapacity() &&
from_space_.EnsureCurrentCapacity();
@@ -1777,6 +1882,8 @@ LocalAllocationBuffer& LocalAllocationBuffer::operator=(
void NewSpace::UpdateAllocationInfo() {
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
+ original_top_.SetValue(top());
+ original_limit_.SetValue(limit());
UpdateInlineAllocationLimit(0);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
@@ -1824,10 +1931,6 @@ bool NewSpace::AddFreshPage() {
// Clear remainder of current page.
Address limit = Page::FromAllocationAreaAddress(top)->area_end();
- if (heap()->gc_state() == Heap::SCAVENGE) {
- heap()->promotion_queue()->SetNewLimit(limit);
- }
-
int remaining_in_page = static_cast<int>(limit - top);
heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
UpdateAllocationInfo();
@@ -2214,7 +2317,6 @@ void SemiSpace::set_age_mark(Address mark) {
std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
// Use the NewSpace::NewObjectIterator to iterate the ToSpace.
UNREACHABLE();
- return std::unique_ptr<ObjectIterator>();
}
#ifdef DEBUG
@@ -2702,7 +2804,7 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
owner_->EmptyAllocationInfo();
owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
- Heap::kNoGCFlags, kNoGCCallbackFlags);
+ Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
size_t new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
@@ -2838,7 +2940,7 @@ size_t FreeListCategory::SumFreeList() {
FreeSpace* cur = top();
while (cur != NULL) {
DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
- sum += cur->nobarrier_size();
+ sum += cur->relaxed_read_size();
cur = cur->next();
}
return sum;
@@ -3140,15 +3242,16 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
}
- heap()->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
- kNoGCCallbackFlags);
+ heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
+ Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
AllocationStep(object->address(), object_size);
heap()->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo);
if (heap()->incremental_marking()->black_allocation()) {
- ObjectMarking::WhiteToBlack(object, MarkingState::Internal(object));
+ ObjectMarking::WhiteToBlack<IncrementalMarking::kAtomicity>(
+ object, MarkingState::Internal(object));
}
return object;
}
@@ -3240,18 +3343,24 @@ void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
}
void LargeObjectSpace::FreeUnmarkedObjects() {
- LargePage* previous = NULL;
+ LargePage* previous = nullptr;
LargePage* current = first_page_;
- while (current != NULL) {
+ while (current != nullptr) {
HeapObject* object = current->GetObject();
DCHECK(!ObjectMarking::IsGrey(object, MarkingState::Internal(object)));
if (ObjectMarking::IsBlack(object, MarkingState::Internal(object))) {
Address free_start;
if ((free_start = current->GetAddressToShrink()) != 0) {
- // TODO(hpayer): Perform partial free concurrently.
+ DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
current->ClearOutOfLiveRangeSlots(free_start);
RemoveChunkMapEntries(current, free_start);
- heap()->memory_allocator()->PartialFreeMemory(current, free_start);
+ const size_t bytes_to_free =
+ current->size() - (free_start - current->address());
+ heap()->memory_allocator()->PartialFreeMemory(
+ current, free_start, bytes_to_free,
+ current->area_start() + object->Size());
+ size_ -= bytes_to_free;
+ AccountUncommitted(bytes_to_free);
}
previous = current;
current = current->next_page();
@@ -3259,7 +3368,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* page = current;
// Cut the chunk out from the chunk list.
current = current->next_page();
- if (previous == NULL) {
+ if (previous == nullptr) {
first_page_ = current;
} else {
previous->set_next_page(current);
@@ -3320,7 +3429,8 @@ void LargeObjectSpace::Verify() {
CHECK(object->IsAbstractCode() || object->IsSeqString() ||
object->IsExternalString() || object->IsThinString() ||
object->IsFixedArray() || object->IsFixedDoubleArray() ||
- object->IsByteArray() || object->IsFreeSpace());
+ object->IsPropertyArray() || object->IsByteArray() ||
+ object->IsFreeSpace());
// The object itself should look OK.
object->ObjectVerify();
@@ -3343,6 +3453,16 @@ void LargeObjectSpace::Verify() {
CHECK(element_object->map()->IsMap());
}
}
+ } else if (object->IsPropertyArray()) {
+ PropertyArray* array = PropertyArray::cast(object);
+ for (int j = 0; j < array->length(); j++) {
+ Object* property = array->get(j);
+ if (property->IsHeapObject()) {
+ HeapObject* property_object = HeapObject::cast(property);
+ CHECK(heap()->Contains(property_object));
+ CHECK(property_object->map()->IsMap());
+ }
+ }
}
}
}