summaryrefslogtreecommitdiff
path: root/chromium/v8/src/heap
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@theqtcompany.com>2016-08-25 10:44:03 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2016-08-25 09:40:07 +0000
commite20ba3c57b50674f625b5088faa0fe9a076c0617 (patch)
tree3006142b83866a52a56d34ade8446d5044647305 /chromium/v8/src/heap
parent28b1110370900897ab652cb420c371fab8857ad4 (diff)
downloadqtwebengine-chromium-e20ba3c57b50674f625b5088faa0fe9a076c0617.tar.gz
BASELINE: Update Chromium to 53.0.2785.80
Also adds 3rdparty libraries under pdfium. Change-Id: I29afb23f1642fa55765d056697d5d145afa22bb2 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/v8/src/heap')
-rw-r--r--chromium/v8/src/heap/heap.cc10
-rw-r--r--chromium/v8/src/heap/spaces.cc21
-rw-r--r--chromium/v8/src/heap/spaces.h14
3 files changed, 36 insertions, 9 deletions
diff --git a/chromium/v8/src/heap/heap.cc b/chromium/v8/src/heap/heap.cc
index 5806ce10161..c59a8d3d373 100644
--- a/chromium/v8/src/heap/heap.cc
+++ b/chromium/v8/src/heap/heap.cc
@@ -3122,14 +3122,8 @@ bool Heap::CanMoveObjectStart(HeapObject* object) {
if (lo_space()->Contains(object)) return false;
- Page* page = Page::FromAddress(address);
- // We can move the object start if:
- // (1) the object is not in old space,
- // (2) the page of the object was already swept,
- // (3) the page was already concurrently swept. This case is an optimization
- // for concurrent sweeping. The WasSwept predicate for concurrently swept
- // pages is set after sweeping all pages.
- return !InOldSpace(object) || page->SweepingDone();
+ // We can move the object start if the page was already swept.
+ return Page::FromAddress(address)->SweepingDone();
}
diff --git a/chromium/v8/src/heap/spaces.cc b/chromium/v8/src/heap/spaces.cc
index 6b47c95bb08..1dcd044cd5e 100644
--- a/chromium/v8/src/heap/spaces.cc
+++ b/chromium/v8/src/heap/spaces.cc
@@ -4,6 +4,8 @@
#include "src/heap/spaces.h"
+#include <utility>
+
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/semaphore.h"
@@ -348,6 +350,7 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
};
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
+ ReconsiderDelayedChunks();
if (FLAG_concurrent_sweeping) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
@@ -381,6 +384,24 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
}
}
+void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
+ std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
+ // Move constructed, so the permanent list should be empty.
+ DCHECK(delayed_regular_chunks_.empty());
+ for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) {
+ AddMemoryChunkSafe<kRegular>(*it);
+ }
+}
+
+bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
+ MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
+ // We cannot free memory chunks in new space while the sweeper is running
+ // since a sweeper thread might be stuck right before trying to lock the
+ // corresponding page.
+ return !chunk->InNewSpace() || (mc == nullptr) ||
+ mc->sweeper().IsSweepingCompleted();
+}
+
bool MemoryAllocator::CommitMemory(Address base, size_t size,
Executability executable) {
if (!base::VirtualMemory::CommitRegion(base, size,
diff --git a/chromium/v8/src/heap/spaces.h b/chromium/v8/src/heap/spaces.h
index e68fc8ef233..04c89a8fab9 100644
--- a/chromium/v8/src/heap/spaces.h
+++ b/chromium/v8/src/heap/spaces.h
@@ -1347,7 +1347,12 @@ class MemoryAllocator {
template <ChunkQueueType type>
void AddMemoryChunkSafe(MemoryChunk* chunk) {
base::LockGuard<base::Mutex> guard(&mutex_);
- chunks_[type].push_back(chunk);
+ if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) {
+ chunks_[type].push_back(chunk);
+ } else {
+ DCHECK_EQ(type, kRegular);
+ delayed_regular_chunks_.push_back(chunk);
+ }
}
template <ChunkQueueType type>
@@ -1359,11 +1364,16 @@ class MemoryAllocator {
return chunk;
}
+ void ReconsiderDelayedChunks();
void PerformFreeMemoryOnQueuedChunks();
base::Mutex mutex_;
MemoryAllocator* allocator_;
std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues];
+ // Delayed chunks cannot be processed in the current unmapping cycle because
+ // of dependencies such as an active sweeper.
+ // See MemoryAllocator::CanFreeMemoryChunk.
+ std::list<MemoryChunk*> delayed_regular_chunks_;
base::Semaphore pending_unmapping_tasks_semaphore_;
intptr_t concurrent_unmapping_tasks_active_;
@@ -1402,6 +1412,8 @@ class MemoryAllocator {
template <MemoryAllocator::FreeMode mode = kFull>
void Free(MemoryChunk* chunk);
+ bool CanFreeMemoryChunk(MemoryChunk* chunk);
+
// Returns allocated spaces in bytes.
intptr_t Size() { return size_.Value(); }