summaryrefslogtreecommitdiff
path: root/deps/v8/src/spaces.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/spaces.cc')
-rw-r--r--deps/v8/src/spaces.cc30
1 files changed, 16 insertions, 14 deletions
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index 5bdbcc7842..e3fb923312 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -873,7 +873,7 @@ void PagedSpace::MarkAllPagesClean() {
}
-Object* PagedSpace::FindObject(Address addr) {
+MaybeObject* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called before or after mark-compact GC
// because it accesses map pointers.
ASSERT(!MarkCompactCollector::in_use());
@@ -1804,7 +1804,7 @@ int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
}
-Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
+MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
ASSERT(0 < size_in_bytes);
ASSERT(size_in_bytes <= kMaxBlockSize);
ASSERT(IsAligned(size_in_bytes, kPointerSize));
@@ -1924,7 +1924,7 @@ void FixedSizeFreeList::Free(Address start) {
}
-Object* FixedSizeFreeList::Allocate() {
+MaybeObject* FixedSizeFreeList::Allocate() {
if (head_ == NULL) {
return Failure::RetryAfterGC(owner_);
}
@@ -2187,9 +2187,10 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
// is currently forbidden.
if (!Heap::linear_allocation()) {
int wasted_bytes;
- Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
+ Object* result;
+ MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
accounting_stats_.WasteBytes(wasted_bytes);
- if (!result->IsFailure()) {
+ if (maybe->ToObject(&result)) {
accounting_stats_.AllocateBytes(size_in_bytes);
HeapObject* obj = HeapObject::cast(result);
@@ -2495,8 +2496,9 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
// that is currently forbidden. The fixed space free list implicitly assumes
// that all free blocks are of the fixed size.
if (!Heap::linear_allocation()) {
- Object* result = free_list_.Allocate();
- if (!result->IsFailure()) {
+ Object* result;
+ MaybeObject* maybe = free_list_.Allocate();
+ if (maybe->ToObject(&result)) {
accounting_stats_.AllocateBytes(size_in_bytes);
HeapObject* obj = HeapObject::cast(result);
Page* p = Page::FromAddress(obj->address());
@@ -2745,9 +2747,9 @@ void LargeObjectSpace::Unprotect() {
#endif
-Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
- int object_size,
- Executability executable) {
+MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
+ int object_size,
+ Executability executable) {
ASSERT(0 < object_size && object_size <= requested_size);
// Check if we want to force a GC before growing the old space further.
@@ -2783,7 +2785,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
}
-Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
+MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
ASSERT(0 < size_in_bytes);
return AllocateRawInternal(size_in_bytes,
size_in_bytes,
@@ -2791,7 +2793,7 @@ Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
}
-Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
+MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
ASSERT(0 < size_in_bytes);
return AllocateRawInternal(size_in_bytes,
size_in_bytes,
@@ -2799,7 +2801,7 @@ Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
}
-Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
+MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
ASSERT(0 < size_in_bytes);
return AllocateRawInternal(size_in_bytes,
size_in_bytes,
@@ -2808,7 +2810,7 @@ Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
// GC support
-Object* LargeObjectSpace::FindObject(Address a) {
+MaybeObject* LargeObjectSpace::FindObject(Address a) {
for (LargeObjectChunk* chunk = first_chunk_;
chunk != NULL;
chunk = chunk->next()) {