summaryrefslogtreecommitdiff
path: root/src/3rdparty/v8/src/incremental-marking.h
diff options
context:
space:
mode:
authorSergio Ahumada <sergio.ahumada@digia.com>2013-03-19 09:25:14 +0100
committerSergio Ahumada <sergio.ahumada@digia.com>2013-03-19 09:56:31 +0100
commit6313e1fe4c27755adde87e62db1c2f9fac534ae4 (patch)
treec57bb29f65e02fbfcc07895a8cc2903fff9300ba /src/3rdparty/v8/src/incremental-marking.h
parentb5a49a260d03249c386f1b63c249089383dd81fa (diff)
parentcac65e7a222b848a735a974b0aeb43209b0cfa18 (diff)
downloadqtjsbackend-6313e1fe4c27755adde87e62db1c2f9fac534ae4.tar.gz
Merge branch 'dev' into stable
This starts Qt 5.1 release cycle Change-Id: I892bbc73c276842894a720f761ce31ad1b015672
Diffstat (limited to 'src/3rdparty/v8/src/incremental-marking.h')
-rw-r--r--src/3rdparty/v8/src/incremental-marking.h51
1 files changed, 28 insertions, 23 deletions
diff --git a/src/3rdparty/v8/src/incremental-marking.h b/src/3rdparty/v8/src/incremental-marking.h
index 39e8dae..6ae0f59 100644
--- a/src/3rdparty/v8/src/incremental-marking.h
+++ b/src/3rdparty/v8/src/incremental-marking.h
@@ -53,6 +53,8 @@ class IncrementalMarking {
explicit IncrementalMarking(Heap* heap);
+ static void Initialize();
+
void TearDown();
State state() {
@@ -93,21 +95,23 @@ class IncrementalMarking {
// progress in the face of the mutator creating new work for it. We start
// of at a moderate rate of work and gradually increase the speed of the
// incremental marker until it completes.
- // Do some marking every time this much memory has been allocated.
+ // Do some marking every time this much memory has been allocated or that many
+ // heavy (color-checking) write barriers have been invoked.
static const intptr_t kAllocatedThreshold = 65536;
+ static const intptr_t kWriteBarriersInvokedThreshold = 65536;
// Start off by marking this many times more memory than has been allocated.
- static const intptr_t kInitialAllocationMarkingFactor = 1;
+ static const intptr_t kInitialMarkingSpeed = 1;
// But if we are promoting a lot of data we need to mark faster to keep up
// with the data that is entering the old space through promotion.
static const intptr_t kFastMarking = 3;
// After this many steps we increase the marking/allocating factor.
- static const intptr_t kAllocationMarkingFactorSpeedupInterval = 1024;
+ static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
// This is how much we increase the marking/allocating factor by.
- static const intptr_t kAllocationMarkingFactorSpeedup = 2;
- static const intptr_t kMaxAllocationMarkingFactor = 1000;
+ static const intptr_t kMarkingSpeedAccelleration = 2;
+ static const intptr_t kMaxMarkingSpeed = 1000;
void OldSpaceStep(intptr_t allocated) {
- Step(allocated * kFastMarking / kInitialAllocationMarkingFactor,
+ Step(allocated * kFastMarking / kInitialMarkingSpeed,
GC_VIA_STACK_GUARD);
}
@@ -130,6 +134,12 @@ class IncrementalMarking {
Object** slot,
Isolate* isolate);
+ // Record a slot for compaction. Returns false for objects that are
+ // guaranteed to be rescanned or not guaranteed to survive.
+ //
+ // No slots in white objects should be recorded, as some slots are typed and
+ // cannot be interpreted correctly if the underlying object does not survive
+ // the incremental cycle (stays white).
INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
INLINE(void RecordWriteIntoCode(HeapObject* obj,
@@ -167,16 +177,6 @@ class IncrementalMarking {
return true;
}
- // Marks the object grey and pushes it on the marking stack.
- // Returns true if object needed marking and false otherwise.
- // This is for incremental marking only.
- INLINE(bool MarkObjectAndPush(HeapObject* obj));
-
- // Marks the object black without pushing it on the marking stack.
- // Returns true if object needed marking and false otherwise.
- // This is for incremental marking only.
- INLINE(bool MarkObjectWithoutPush(HeapObject* obj));
-
inline int steps_count() {
return steps_count_;
}
@@ -213,12 +213,13 @@ class IncrementalMarking {
void NotifyOfHighPromotionRate() {
if (IsMarking()) {
- if (allocation_marking_factor_ < kFastMarking) {
+ if (marking_speed_ < kFastMarking) {
if (FLAG_trace_gc) {
- PrintF("Increasing marking speed to %d due to high promotion rate\n",
- static_cast<int>(kFastMarking));
+ PrintPID("Increasing marking speed to %d "
+ "due to high promotion rate\n",
+ static_cast<int>(kFastMarking));
}
- allocation_marking_factor_ = kFastMarking;
+ marking_speed_ = kFastMarking;
}
}
}
@@ -258,7 +259,11 @@ class IncrementalMarking {
void EnsureMarkingDequeIsCommitted();
- void VisitGlobalContext(Context* ctx, ObjectVisitor* v);
+ INLINE(void ProcessMarkingDeque());
+
+ INLINE(void ProcessMarkingDeque(intptr_t bytes_to_process));
+
+ INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
Heap* heap_;
@@ -268,7 +273,6 @@ class IncrementalMarking {
VirtualMemory* marking_deque_memory_;
bool marking_deque_memory_committed_;
MarkingDeque marking_deque_;
- Marker<IncrementalMarking> marker_;
int steps_count_;
double steps_took_;
@@ -279,9 +283,10 @@ class IncrementalMarking {
double steps_took_since_last_gc_;
int64_t bytes_rescanned_;
bool should_hurry_;
- int allocation_marking_factor_;
+ int marking_speed_;
intptr_t bytes_scanned_;
intptr_t allocated_;
+ intptr_t write_barriers_invoked_since_last_step_;
int no_marking_scope_depth_;