summaryrefslogtreecommitdiff
path: root/deps/v8/src/spaces-inl.h
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2013-11-10 02:02:27 +0100
committerBen Noordhuis <info@bnoordhuis.nl>2013-11-11 02:40:36 +0100
commitf230a1cf749e984439b5bb9729d9db9f48472827 (patch)
tree153596de2251b717ad79823f23fabf4c140d6d35 /deps/v8/src/spaces-inl.h
parenta12870c823b9b67110b27a470fcac342cf1dfbd6 (diff)
downloadnode-f230a1cf749e984439b5bb9729d9db9f48472827.tar.gz
v8: upgrade to 3.22.24
This commit removes the simple/test-event-emitter-memory-leak test for being unreliable with the new garbage collector: the memory pressure exerted by the test case is too low for the garbage collector to kick in. It can be made to work again by limiting the heap size with the --max_old_space_size=x flag but that won't be very reliable across platforms and architectures.
Diffstat (limited to 'deps/v8/src/spaces-inl.h')
-rw-r--r--deps/v8/src/spaces-inl.h38
1 files changed, 28 insertions, 10 deletions
diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h
index be2ae2a57..d5c114c5b 100644
--- a/deps/v8/src/spaces-inl.h
+++ b/deps/v8/src/spaces-inl.h
@@ -28,6 +28,7 @@
#ifndef V8_SPACES_INL_H_
#define V8_SPACES_INL_H_
+#include "heap-profiler.h"
#include "isolate.h"
#include "spaces.h"
#include "v8memory.h"
@@ -263,22 +264,28 @@ void Page::set_prev_page(Page* page) {
// allocation) so it can be used by all the allocation functions and for all
// the paged spaces.
HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
- Address current_top = allocation_info_.top;
+ Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
- if (new_top > allocation_info_.limit) return NULL;
+ if (new_top > allocation_info_.limit()) return NULL;
- allocation_info_.top = new_top;
+ allocation_info_.set_top(new_top);
return HeapObject::FromAddress(current_top);
}
// Raw allocation.
-MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
+MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes,
+ AllocationType event) {
+ HeapProfiler* profiler = heap()->isolate()->heap_profiler();
+
HeapObject* object = AllocateLinearly(size_in_bytes);
if (object != NULL) {
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
+ if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(object->address(), size_in_bytes);
+ }
return object;
}
@@ -291,6 +298,9 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
+ if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(object->address(), size_in_bytes);
+ }
return object;
}
@@ -299,6 +309,9 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
+ if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(object->address(), size_in_bytes);
+ }
return object;
}
@@ -311,31 +324,36 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
- Address old_top = allocation_info_.top;
+ Address old_top = allocation_info_.top();
#ifdef DEBUG
// If we are stressing compaction we waste some memory in new space
// in order to get more frequent GCs.
if (FLAG_stress_compaction && !heap()->linear_allocation()) {
- if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
+ if (allocation_info_.limit() - old_top >= size_in_bytes * 4) {
int filler_size = size_in_bytes * 4;
for (int i = 0; i < filler_size; i += kPointerSize) {
*(reinterpret_cast<Object**>(old_top + i)) =
heap()->one_pointer_filler_map();
}
old_top += filler_size;
- allocation_info_.top += filler_size;
+ allocation_info_.set_top(allocation_info_.top() + filler_size);
}
}
#endif
- if (allocation_info_.limit - old_top < size_in_bytes) {
+ if (allocation_info_.limit() - old_top < size_in_bytes) {
return SlowAllocateRaw(size_in_bytes);
}
- Object* obj = HeapObject::FromAddress(old_top);
- allocation_info_.top += size_in_bytes;
+ HeapObject* obj = HeapObject::FromAddress(old_top);
+ allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+ HeapProfiler* profiler = heap()->isolate()->heap_profiler();
+ if (profiler != NULL && profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(obj->address(), size_in_bytes);
+ }
+
return obj;
}