summaryrefslogtreecommitdiff
path: root/deps/v8/src/spaces.cc
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2011-11-02 16:58:08 +0100
committerBen Noordhuis <info@bnoordhuis.nl>2011-11-02 16:58:35 +0100
commitedea4122b1c725a9f7873c02fe04100995472ddc (patch)
tree3334347495150cfd3a68909489689c112457ae07 /deps/v8/src/spaces.cc
parentcc9223406837e7610b5f36b16b6a0e51861370cb (diff)
downloadnode-edea4122b1c725a9f7873c02fe04100995472ddc.tar.gz
Revert "Upgrade V8 to 3.7.1"
This reverts commit 92f5a5d3caf01f382f90c235e9057590a5e76870. V8 3.7.1 in debug mode on ia32 has a curious race-like bug where an fs.Stats object is not fully formed until some time after it's created. This is easy to demonstrate by running `make test-debug`. V8 3.7.0 does not exhibit this behaviour so back we go. Fixes #1981.
Diffstat (limited to 'deps/v8/src/spaces.cc')
-rw-r--r--deps/v8/src/spaces.cc39
1 files changed, 15 insertions, 24 deletions
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index f467f710c..61b318118 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -95,6 +95,10 @@ void HeapObjectIterator::Initialize(PagedSpace* space,
cur_end_ = end;
page_mode_ = mode;
size_func_ = size_f;
+
+#ifdef DEBUG
+ Verify();
+#endif
}
@@ -119,6 +123,13 @@ bool HeapObjectIterator::AdvanceToNextPage() {
}
+#ifdef DEBUG
+void HeapObjectIterator::Verify() {
+ // TODO(gc): We should do something here.
+}
+#endif
+
+
// -----------------------------------------------------------------------------
// CodeRange
@@ -1898,24 +1909,11 @@ intptr_t FreeList::SumFreeLists() {
bool NewSpace::ReserveSpace(int bytes) {
// We can't reliably unpack a partial snapshot that needs more new space
- // space than the minimum NewSpace size. The limit can be set lower than
- // the end of new space either because there is more space on the next page
- // or because we have lowered the limit in order to get periodic incremental
- // marking. The most reliable way to ensure that there is linear space is
- // to do the allocation, then rewind the limit.
+ // space than the minimum NewSpace size.
ASSERT(bytes <= InitialCapacity());
- MaybeObject* maybe = AllocateRawInternal(bytes);
- Object* object = NULL;
- if (!maybe->ToObject(&object)) return false;
- HeapObject* allocation = HeapObject::cast(object);
+ Address limit = allocation_info_.limit;
Address top = allocation_info_.top;
- if ((top - bytes) == allocation->address()) {
- allocation_info_.top = allocation->address();
- return true;
- }
- // There may be a borderline case here where the allocation succeeded, but
- // the limit and top have moved on to a new page. In that case we try again.
- return ReserveSpace(bytes);
+ return limit - top >= bytes;
}
@@ -2280,11 +2278,8 @@ HeapObject* LargeObjectIterator::Next() {
// -----------------------------------------------------------------------------
// LargeObjectSpace
-LargeObjectSpace::LargeObjectSpace(Heap* heap,
- intptr_t max_capacity,
- AllocationSpace id)
+LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
: Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
- max_capacity_(max_capacity),
first_page_(NULL),
size_(0),
page_count_(0),
@@ -2324,10 +2319,6 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
return Failure::RetryAfterGC(identity());
}
- if (Size() + object_size > max_capacity_) {
- return Failure::RetryAfterGC(identity());
- }
-
LargePage* page = heap()->isolate()->memory_allocator()->
AllocateLargePage(object_size, executable, this);
if (page == NULL) return Failure::RetryAfterGC(identity());