summaryrefslogtreecommitdiff
path: root/deps/v8/test/cctest/test-spaces.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/test/cctest/test-spaces.cc')
-rw-r--r--deps/v8/test/cctest/test-spaces.cc248
1 files changed, 248 insertions, 0 deletions
diff --git a/deps/v8/test/cctest/test-spaces.cc b/deps/v8/test/cctest/test-spaces.cc
new file mode 100644
index 0000000000..d946a7fa54
--- /dev/null
+++ b/deps/v8/test/cctest/test-spaces.cc
@@ -0,0 +1,248 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+#include "cctest.h"
+
+using namespace v8::internal;
+
+static void VerifyRSet(Address page_start) {
+#ifdef DEBUG
+ Page::set_rset_state(Page::IN_USE);
+#endif
+
+ Page* p = Page::FromAddress(page_start);
+
+ p->ClearRSet();
+
+ for (Address addr = p->ObjectAreaStart();
+ addr < p->ObjectAreaEnd();
+ addr += kPointerSize) {
+ CHECK(!Page::IsRSetSet(addr, 0));
+ }
+
+ for (Address addr = p->ObjectAreaStart();
+ addr < p->ObjectAreaEnd();
+ addr += kPointerSize) {
+ Page::SetRSet(addr, 0);
+ }
+
+ for (Address addr = p->ObjectAreaStart();
+ addr < p->ObjectAreaEnd();
+ addr += kPointerSize) {
+ CHECK(Page::IsRSetSet(addr, 0));
+ }
+}
+
+
+TEST(Page) {
+#ifdef DEBUG
+ Page::set_rset_state(Page::NOT_IN_USE);
+#endif
+
+ byte* mem = NewArray<byte>(2*Page::kPageSize);
+ CHECK(mem != NULL);
+
+ Address start = reinterpret_cast<Address>(mem);
+ Address page_start = RoundUp(start, Page::kPageSize);
+
+ Page* p = Page::FromAddress(page_start);
+ CHECK(p->address() == page_start);
+ CHECK(p->is_valid());
+
+ p->opaque_header = 0;
+ p->is_normal_page = 0x1;
+ CHECK(!p->next_page()->is_valid());
+
+ CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset);
+ CHECK(p->ObjectAreaEnd() == page_start + Page::kPageSize);
+
+ CHECK(p->Offset(page_start + Page::kObjectStartOffset) ==
+ Page::kObjectStartOffset);
+ CHECK(p->Offset(page_start + Page::kPageSize) == Page::kPageSize);
+
+ CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart());
+ CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd());
+
+ // test remember set
+ VerifyRSet(page_start);
+
+ DeleteArray(mem);
+}
+
+
+TEST(MemoryAllocator) {
+ CHECK(Heap::ConfigureHeapDefault());
+ CHECK(MemoryAllocator::Setup(Heap::MaxCapacity()));
+
+ OldSpace faked_space(Heap::MaxCapacity(), OLD_POINTER_SPACE, NOT_EXECUTABLE);
+ int total_pages = 0;
+ int requested = 2;
+ int allocated;
+ // If we request two pages, we should get one or two.
+ Page* first_page =
+ MemoryAllocator::AllocatePages(requested, &allocated, &faked_space);
+ CHECK(first_page->is_valid());
+ CHECK(allocated > 0 && allocated <= 2);
+ total_pages += allocated;
+
+ Page* last_page = first_page;
+ for (Page* p = first_page; p->is_valid(); p = p->next_page()) {
+ CHECK(MemoryAllocator::IsPageInSpace(p, &faked_space));
+ last_page = p;
+ }
+
+ // Again, we should get one or two pages.
+ Page* others =
+ MemoryAllocator::AllocatePages(requested, &allocated, &faked_space);
+ CHECK(others->is_valid());
+ CHECK(allocated > 0 && allocated <= 2);
+ total_pages += allocated;
+
+ MemoryAllocator::SetNextPage(last_page, others);
+ int page_count = 0;
+ for (Page* p = first_page; p->is_valid(); p = p->next_page()) {
+ CHECK(MemoryAllocator::IsPageInSpace(p, &faked_space));
+ page_count++;
+ }
+ CHECK(total_pages == page_count);
+
+ Page* second_page = first_page->next_page();
+ CHECK(second_page->is_valid());
+
+ // Freeing pages at the first chunk starting at or after the second page
+ // should free the entire second chunk. It will return the last page in the
+ // first chunk (if the second page was in the first chunk) or else an
+ // invalid page (if the second page was the start of the second chunk).
+ Page* free_return = MemoryAllocator::FreePages(second_page);
+ CHECK(free_return == last_page || !free_return->is_valid());
+ MemoryAllocator::SetNextPage(first_page, free_return);
+
+ // Freeing pages in the first chunk starting at the first page should free
+ // the first chunk and return an invalid page.
+ Page* invalid_page = MemoryAllocator::FreePages(first_page);
+ CHECK(!invalid_page->is_valid());
+
+ MemoryAllocator::TearDown();
+}
+
+
+TEST(NewSpace) {
+ CHECK(Heap::ConfigureHeapDefault());
+ CHECK(MemoryAllocator::Setup(Heap::MaxCapacity()));
+
+ NewSpace new_space;
+
+ void* chunk =
+ MemoryAllocator::ReserveInitialChunk(2 * Heap::YoungGenerationSize());
+ CHECK(chunk != NULL);
+ Address start = RoundUp(static_cast<Address>(chunk),
+ Heap::YoungGenerationSize());
+ CHECK(new_space.Setup(start, Heap::YoungGenerationSize()));
+ CHECK(new_space.HasBeenSetup());
+
+ while (new_space.Available() >= Page::kMaxHeapObjectSize) {
+ Object* obj = new_space.AllocateRaw(Page::kMaxHeapObjectSize);
+ CHECK(!obj->IsFailure());
+ CHECK(new_space.Contains(HeapObject::cast(obj)));
+ }
+
+ new_space.TearDown();
+ MemoryAllocator::TearDown();
+}
+
+
+TEST(OldSpace) {
+ CHECK(Heap::ConfigureHeapDefault());
+ CHECK(MemoryAllocator::Setup(Heap::MaxCapacity()));
+
+ OldSpace* s = new OldSpace(Heap::OldGenerationSize(),
+ OLD_POINTER_SPACE,
+ NOT_EXECUTABLE);
+ CHECK(s != NULL);
+
+ void* chunk =
+ MemoryAllocator::ReserveInitialChunk(2 * Heap::YoungGenerationSize());
+ CHECK(chunk != NULL);
+ Address start = static_cast<Address>(chunk);
+ size_t size = RoundUp(start, Heap::YoungGenerationSize()) - start;
+
+ CHECK(s->Setup(start, size));
+
+ while (s->Available() > 0) {
+ Object* obj = s->AllocateRaw(Page::kMaxHeapObjectSize);
+ CHECK(!obj->IsFailure());
+ }
+
+ s->TearDown();
+ delete s;
+ MemoryAllocator::TearDown();
+}
+
+
+TEST(LargeObjectSpace) {
+ CHECK(Heap::Setup(false));
+
+ LargeObjectSpace* lo = Heap::lo_space();
+ CHECK(lo != NULL);
+
+ Map* faked_map = reinterpret_cast<Map*>(HeapObject::FromAddress(0));
+ int lo_size = Page::kPageSize;
+
+ Object* obj = lo->AllocateRaw(lo_size);
+ CHECK(!obj->IsFailure());
+ CHECK(obj->IsHeapObject());
+
+ HeapObject* ho = HeapObject::cast(obj);
+ ho->set_map(faked_map);
+
+ CHECK(lo->Contains(HeapObject::cast(obj)));
+
+ CHECK(lo->FindObject(ho->address()) == obj);
+
+ CHECK(lo->Contains(ho));
+
+ while (true) {
+ int available = lo->Available();
+ obj = lo->AllocateRaw(lo_size);
+ if (obj->IsFailure()) break;
+ HeapObject::cast(obj)->set_map(faked_map);
+ CHECK(lo->Available() < available);
+ };
+
+ CHECK(!lo->IsEmpty());
+
+ obj = lo->AllocateRaw(lo_size);
+ CHECK(obj->IsFailure());
+
+ lo->TearDown();
+ delete lo;
+
+ MemoryAllocator::TearDown();
+}