summaryrefslogtreecommitdiff
path: root/chromium/v8/src/objects/backing-store.cc
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/objects/backing-store.cc')
-rw-r--r--chromium/v8/src/objects/backing-store.cc282
1 files changed, 204 insertions, 78 deletions
diff --git a/chromium/v8/src/objects/backing-store.cc b/chromium/v8/src/objects/backing-store.cc
index 08288ef62c0..77eaf3a722c 100644
--- a/chromium/v8/src/objects/backing-store.cc
+++ b/chromium/v8/src/objects/backing-store.cc
@@ -37,6 +37,14 @@ namespace {
constexpr size_t kPlatformMaxPages =
std::numeric_limits<size_t>::max() / wasm::kWasmPageSize;
+constexpr uint64_t kNegativeGuardSize = uint64_t{2} * GB;
+
+#if V8_TARGET_ARCH_64_BIT
+constexpr uint64_t kFullGuardSize = uint64_t{10} * GB;
+#endif
+
+#endif // V8_ENABLE_WEBASSEMBLY
+
#if V8_TARGET_ARCH_MIPS64
// MIPS64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller.
@@ -50,12 +58,6 @@ constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB
#endif
-constexpr uint64_t kNegativeGuardSize = uint64_t{2} * GB;
-
-#if V8_TARGET_ARCH_64_BIT
-constexpr uint64_t kFullGuardSize = uint64_t{10} * GB;
-#endif
-
std::atomic<uint64_t> reserved_address_space_{0};
// Allocation results are reported to UMA
@@ -75,7 +77,7 @@ enum class AllocationStatus {
base::AddressRegion GetReservedRegion(bool has_guard_regions,
void* buffer_start,
size_t byte_capacity) {
-#if V8_TARGET_ARCH_64_BIT
+#if V8_TARGET_ARCH_64_BIT && V8_ENABLE_WEBASSEMBLY
if (has_guard_regions) {
// Guard regions always look like this:
// |xxx(2GiB)xxx|.......(4GiB)..xxxxx|xxxxxx(4GiB)xxxxxx|
@@ -97,7 +99,7 @@ base::AddressRegion GetReservedRegion(bool has_guard_regions,
}
size_t GetReservationSize(bool has_guard_regions, size_t byte_capacity) {
-#if V8_TARGET_ARCH_64_BIT
+#if V8_TARGET_ARCH_64_BIT && V8_ENABLE_WEBASSEMBLY
if (has_guard_regions) return kFullGuardSize;
#else
DCHECK(!has_guard_regions);
@@ -110,7 +112,6 @@ void RecordStatus(Isolate* isolate, AllocationStatus status) {
isolate->counters()->wasm_memory_allocation_result()->AddSample(
static_cast<int>(status));
}
-#endif // V8_ENABLE_WEBASSEMBLY
inline void DebugCheckZero(void* start, size_t byte_length) {
#if DEBUG
@@ -160,6 +161,8 @@ BackingStore::~BackingStore() {
#if V8_ENABLE_WEBASSEMBLY
if (is_wasm_memory_) {
+ // TODO(v8:11111): RAB / GSAB - Wasm integration.
+ DCHECK(!is_resizable_);
DCHECK(free_on_destruct_);
DCHECK(!custom_deleter_);
size_t reservation_size =
@@ -189,6 +192,23 @@ BackingStore::~BackingStore() {
}
#endif // V8_ENABLE_WEBASSEMBLY
+ if (is_resizable_) {
+ DCHECK(free_on_destruct_);
+ DCHECK(!custom_deleter_);
+ size_t reservation_size =
+ GetReservationSize(has_guard_regions_, byte_capacity_);
+ auto region =
+ GetReservedRegion(has_guard_regions_, buffer_start_, byte_capacity_);
+
+ bool pages_were_freed =
+ region.size() == 0 /* no need to free any pages */ ||
+ FreePages(GetPlatformPageAllocator(),
+ reinterpret_cast<void*>(region.begin()), region.size());
+ CHECK(pages_were_freed);
+ BackingStore::ReleaseReservation(reservation_size);
+ Clear();
+ return;
+ }
if (custom_deleter_) {
DCHECK(free_on_destruct_);
TRACE_BS("BS:custom deleter bs=%p mem=%p (length=%zu, capacity=%zu)\n",
@@ -252,15 +272,16 @@ std::unique_ptr<BackingStore> BackingStore::Allocate(
}
}
- auto result = new BackingStore(buffer_start, // start
- byte_length, // length
- byte_length, // capacity
- shared, // shared
- false, // is_wasm_memory
- true, // free_on_destruct
- false, // has_guard_regions
- false, // custom_deleter
- false); // empty_deleter
+ auto result = new BackingStore(buffer_start, // start
+ byte_length, // length
+ byte_length, // capacity
+ shared, // shared
+ ResizableFlag::kNotResizable, // resizable
+ false, // is_wasm_memory
+ true, // free_on_destruct
+ false, // has_guard_regions
+ false, // custom_deleter
+ false); // empty_deleter
TRACE_BS("BS:alloc bs=%p mem=%p (length=%zu)\n", result,
result->buffer_start(), byte_length);
@@ -281,6 +302,27 @@ void BackingStore::SetAllocatorFromIsolate(Isolate* isolate) {
}
#if V8_ENABLE_WEBASSEMBLY
+// Allocate a backing store for a Wasm memory. Always use the page allocator
+// and add guard regions.
+std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
+ Isolate* isolate, size_t initial_pages, size_t maximum_pages,
+ SharedFlag shared) {
+ // Compute size of reserved memory.
+ size_t engine_max_pages = wasm::max_mem_pages();
+ maximum_pages = std::min(engine_max_pages, maximum_pages);
+
+ auto result = TryAllocateAndPartiallyCommitMemory(
+ isolate, initial_pages * wasm::kWasmPageSize, wasm::kWasmPageSize,
+ initial_pages, maximum_pages, true, shared);
+ // Shared Wasm memories need an anchor for the memory object list.
+ if (result && shared == SharedFlag::kShared) {
+ result->type_specific_data_.shared_wasm_memory_data =
+ new SharedWasmMemoryData();
+ }
+ return result;
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) {
uint64_t reservation_limit = kAddressSpaceLimit;
uint64_t old_count = reserved_address_space_.load(std::memory_order_relaxed);
@@ -300,17 +342,21 @@ void BackingStore::ReleaseReservation(uint64_t num_bytes) {
DCHECK_LE(num_bytes, old_reserved);
}
-// Allocate a backing store for a Wasm memory. Always use the page allocator
-// and add guard regions.
-std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
- Isolate* isolate, size_t initial_pages, size_t maximum_pages,
+std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
+ Isolate* isolate, size_t byte_length, size_t page_size,
+ size_t initial_pages, size_t maximum_pages, bool is_wasm_memory,
SharedFlag shared) {
+ // Enforce engine limitation on the maximum number of pages.
+ if (maximum_pages > std::numeric_limits<size_t>::max() / page_size) {
+ return nullptr;
+ }
+
// Cannot reserve 0 pages on some OSes.
if (maximum_pages == 0) maximum_pages = 1;
TRACE_BS("BSw:try %zu pages, %zu max\n", initial_pages, maximum_pages);
- bool guards = trap_handler::IsTrapHandlerEnabled();
+ bool guards = is_wasm_memory && trap_handler::IsTrapHandlerEnabled();
// For accounting purposes, whether a GC was necessary.
bool did_retry = false;
@@ -329,16 +375,7 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
return false;
};
- // Compute size of reserved memory.
-
- size_t engine_max_pages = wasm::max_mem_pages();
- maximum_pages = std::min(engine_max_pages, maximum_pages);
- // If the platform doesn't support so many pages, attempting to allocate
- // is guaranteed to fail, so we don't even try.
- if (maximum_pages > kPlatformMaxPages) return {};
- CHECK_LE(maximum_pages,
- std::numeric_limits<size_t>::max() / wasm::kWasmPageSize);
- size_t byte_capacity = maximum_pages * wasm::kWasmPageSize;
+ size_t byte_capacity = maximum_pages * page_size;
size_t reservation_size = GetReservationSize(guards, byte_capacity);
//--------------------------------------------------------------------------
@@ -366,7 +403,7 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
auto allocate_pages = [&] {
allocation_base =
AllocatePages(GetPlatformPageAllocator(), nullptr, reservation_size,
- wasm::kWasmPageSize, PageAllocator::kNoAccess);
+ page_size, PageAllocator::kNoAccess);
return allocation_base != nullptr;
};
if (!gc_retry(allocate_pages)) {
@@ -379,23 +416,27 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
// Get a pointer to the start of the buffer, skipping negative guard region
// if necessary.
+#if V8_ENABLE_WEBASSEMBLY
byte* buffer_start = reinterpret_cast<byte*>(allocation_base) +
(guards ? kNegativeGuardSize : 0);
-
+#else
+ DCHECK(!guards);
+ byte* buffer_start = reinterpret_cast<byte*>(allocation_base);
+#endif
//--------------------------------------------------------------------------
// 3. Commit the initial pages (allow read/write).
//--------------------------------------------------------------------------
- size_t byte_length = initial_pages * wasm::kWasmPageSize;
+ size_t committed_byte_length = initial_pages * page_size;
auto commit_memory = [&] {
- return byte_length == 0 ||
- SetPermissions(GetPlatformPageAllocator(), buffer_start, byte_length,
- PageAllocator::kReadWrite);
+ return committed_byte_length == 0 ||
+ SetPermissions(GetPlatformPageAllocator(), buffer_start,
+ committed_byte_length, PageAllocator::kReadWrite);
};
if (!gc_retry(commit_memory)) {
TRACE_BS("BSw:try failed to set permissions (%p, %zu)\n", buffer_start,
- byte_length);
+ committed_byte_length);
// SetPermissions put us over the process memory limit.
- V8::FatalProcessOutOfMemory(nullptr, "BackingStore::AllocateWasmMemory()");
+ V8::FatalProcessOutOfMemory(nullptr, "BackingStore::AllocateMemory()");
}
DebugCheckZero(buffer_start, byte_length); // touch the bytes.
@@ -403,30 +444,29 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
RecordStatus(isolate, did_retry ? AllocationStatus::kSuccessAfterRetry
: AllocationStatus::kSuccess);
- auto result = new BackingStore(buffer_start, // start
- byte_length, // length
- byte_capacity, // capacity
- shared, // shared
- true, // is_wasm_memory
- true, // free_on_destruct
- guards, // has_guard_regions
- false, // custom_deleter
- false); // empty_deleter
+ ResizableFlag resizable =
+ is_wasm_memory ? ResizableFlag::kNotResizable : ResizableFlag::kResizable;
+
+ auto result = new BackingStore(buffer_start, // start
+ byte_length, // length
+ byte_capacity, // capacity
+ shared, // shared
+ resizable, // resizable
+ is_wasm_memory, // is_wasm_memory
+ true, // free_on_destruct
+ guards, // has_guard_regions
+ false, // custom_deleter
+ false); // empty_deleter
TRACE_BS(
"BSw:alloc bs=%p mem=%p (length=%zu, capacity=%zu, reservation=%zu)\n",
result, result->buffer_start(), byte_length, byte_capacity,
reservation_size);
- // Shared Wasm memories need an anchor for the memory object list.
- if (shared == SharedFlag::kShared) {
- result->type_specific_data_.shared_wasm_memory_data =
- new SharedWasmMemoryData();
- }
-
return std::unique_ptr<BackingStore>(result);
}
+#if V8_ENABLE_WEBASSEMBLY
// Allocate a backing store for a Wasm memory. Always use the page allocator
// and add guard regions.
std::unique_ptr<BackingStore> BackingStore::AllocateWasmMemory(
@@ -583,18 +623,102 @@ void BackingStore::UpdateSharedWasmMemoryObjects(Isolate* isolate) {
}
#endif // V8_ENABLE_WEBASSEMBLY
+// Commit already reserved memory (for RAB backing stores (not shared)).
+BackingStore::ResizeOrGrowResult BackingStore::ResizeInPlace(
+ Isolate* isolate, size_t new_byte_length, size_t new_committed_length) {
+ DCHECK_LE(new_byte_length, new_committed_length);
+ DCHECK(!is_shared());
+
+ if (new_byte_length < byte_length_) {
+ // TOOO(v8:11111): Figure out a strategy for shrinking - when do we
+ // un-commit the memory?
+
+ // Zero the memory so that in case the buffer is grown later, we have
+ // zeroed the contents already.
+ memset(reinterpret_cast<byte*>(buffer_start_) + new_byte_length, 0,
+ byte_length_ - new_byte_length);
+
+ // Changing the byte length wouldn't strictly speaking be needed, since
+ // the JSArrayBuffer already stores the updated length. This is to keep
+ // the BackingStore and JSArrayBuffer in sync.
+ byte_length_ = new_byte_length;
+ return kSuccess;
+ }
+ if (new_byte_length == byte_length_) {
+ // i::SetPermissions with size 0 fails on some platforms, so special
+ // handling for the case byte_length_ == new_byte_length == 0 is required.
+ return kSuccess;
+ }
+
+ // Try to adjust the permissions on the memory.
+ if (!i::SetPermissions(GetPlatformPageAllocator(), buffer_start_,
+ new_committed_length, PageAllocator::kReadWrite)) {
+ return kFailure;
+ }
+
+ // Do per-isolate accounting for non-shared backing stores.
+ DCHECK(free_on_destruct_);
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(new_byte_length - byte_length_);
+ byte_length_ = new_byte_length;
+ return kSuccess;
+}
+
+// Commit already reserved memory (for GSAB backing stores (shared)).
+BackingStore::ResizeOrGrowResult BackingStore::GrowInPlace(
+ Isolate* isolate, size_t new_byte_length, size_t new_committed_length) {
+ DCHECK_LE(new_byte_length, new_committed_length);
+ DCHECK(is_shared());
+ // See comment in GrowWasmMemoryInPlace.
+ // GrowableSharedArrayBuffer.prototype.grow can be called from several
+ // threads. If two threads try to grow() in a racy way, the spec allows the
+ // larger grow to throw also if the smaller grow succeeds first. The
+ // implementation below doesn't throw in that case - instead, it retries and
+ // succeeds. If the larger grow finishes first though, the smaller grow must
+ // throw.
+ size_t old_byte_length = byte_length_.load(std::memory_order_seq_cst);
+ while (true) {
+ if (new_byte_length < old_byte_length) {
+ // The caller checks for the new_byte_length < old_byte_length_ case. This
+ // can only happen if another thread grew the memory after that.
+ return kRace;
+ }
+ if (new_byte_length == old_byte_length) {
+ // i::SetPermissions with size 0 fails on some platforms, so special
+ // handling for the case old_byte_length == new_byte_length == 0 is
+ // required.
+ return kSuccess;
+ }
+
+ // Try to adjust the permissions on the memory.
+ if (!i::SetPermissions(GetPlatformPageAllocator(), buffer_start_,
+ new_committed_length, PageAllocator::kReadWrite)) {
+ return kFailure;
+ }
+
+ // compare_exchange_weak updates old_byte_length.
+ if (byte_length_.compare_exchange_weak(old_byte_length, new_byte_length,
+ std::memory_order_seq_cst)) {
+ // Successfully updated both the length and permissions.
+ break;
+ }
+ }
+ return kSuccess;
+}
+
std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
Isolate* isolate, void* allocation_base, size_t allocation_length,
SharedFlag shared, bool free_on_destruct) {
- auto result = new BackingStore(allocation_base, // start
- allocation_length, // length
- allocation_length, // capacity
- shared, // shared
- false, // is_wasm_memory
- free_on_destruct, // free_on_destruct
- false, // has_guard_regions
- false, // custom_deleter
- false); // empty_deleter
+ auto result = new BackingStore(allocation_base, // start
+ allocation_length, // length
+ allocation_length, // capacity
+ shared, // shared
+ ResizableFlag::kNotResizable, // resizable
+ false, // is_wasm_memory
+ free_on_destruct, // free_on_destruct
+ false, // has_guard_regions
+ false, // custom_deleter
+ false); // empty_deleter
result->SetAllocatorFromIsolate(isolate);
TRACE_BS("BS:wrap bs=%p mem=%p (length=%zu)\n", result,
result->buffer_start(), result->byte_length());
@@ -606,10 +730,11 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
v8::BackingStore::DeleterCallback deleter, void* deleter_data,
SharedFlag shared) {
bool is_empty_deleter = (deleter == v8::BackingStore::EmptyDeleter);
- auto result = new BackingStore(allocation_base, // start
- allocation_length, // length
- allocation_length, // capacity
- shared, // shared
+ auto result = new BackingStore(allocation_base, // start
+ allocation_length, // length
+ allocation_length, // capacity
+ shared, // shared
+ ResizableFlag::kNotResizable, // resizable
false, // is_wasm_memory
true, // free_on_destruct
false, // has_guard_regions
@@ -623,15 +748,16 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
std::unique_ptr<BackingStore> BackingStore::EmptyBackingStore(
SharedFlag shared) {
- auto result = new BackingStore(nullptr, // start
- 0, // length
- 0, // capacity
- shared, // shared
- false, // is_wasm_memory
- true, // free_on_destruct
- false, // has_guard_regions
- false, // custom_deleter
- false); // empty_deleter
+ auto result = new BackingStore(nullptr, // start
+ 0, // length
+ 0, // capacity
+ shared, // shared
+ ResizableFlag::kNotResizable, // resizable
+ false, // is_wasm_memory
+ true, // free_on_destruct
+ false, // has_guard_regions
+ false, // custom_deleter
+ false); // empty_deleter
return std::unique_ptr<BackingStore>(result);
}